Repository: googlecloudrobotics/core
Branch: main
Commit: 086c5c4d4aeb
Files: 462
Total size: 3.5 MB
Directory structure:
gitextract_mwr70acn/
├── .bazelignore
├── .bazelrc
├── .bazelversion
├── .dockerignore
├── .editorconfig
├── .github/
│ ├── ci/
│ │ ├── .bazelrc
│ │ ├── Dockerfile.integration-test-image
│ │ ├── common.sh
│ │ ├── deploy_navtest.sh
│ │ ├── deploy_navtest_cloudbuild.yaml
│ │ ├── deployments/
│ │ │ ├── robco-integration-test/
│ │ │ │ ├── config.sh
│ │ │ │ └── kubernetes/
│ │ │ │ └── k8s-relay-rollout.yaml
│ │ │ └── robco-navtest/
│ │ │ └── config.sh
│ │ ├── integration_test.sh
│ │ ├── integration_test_cloudbuild.yaml
│ │ ├── integration_test_image_builder.sh
│ │ ├── presubmit.sh
│ │ └── release_binary.sh
│ ├── dependabot.yml
│ └── workflows/
│ ├── check-bazel.yml
│ ├── postsubmit.yml
│ ├── presubmit.yml
│ └── release.yml
├── .gitignore
├── .pep8
├── BUILD.bazel
├── CONTRIBUTING.md
├── LICENSE
├── METADATA
├── MODULE.bazel
├── README.md
├── bazel/
│ ├── BUILD.bazel
│ ├── BUILD.sysroot
│ ├── app.bzl
│ ├── app_chart.bzl
│ ├── build_rules/
│ │ ├── app_chart/
│ │ │ ├── BUILD.bazel
│ │ │ ├── Chart.yaml.template
│ │ │ ├── cache_gcr_credentials.bzl
│ │ │ ├── cache_gcr_credentials.sh.tpl
│ │ │ ├── push_all.bzl
│ │ │ ├── push_all.sh.tpl
│ │ │ ├── run_parallel.bzl
│ │ │ ├── run_parallel.sh.tpl
│ │ │ ├── values-cloud.yaml
│ │ │ └── values-robot.yaml
│ │ ├── copy.bzl
│ │ ├── helm_chart.bzl
│ │ └── helm_template.bzl
│ ├── container_push.bzl
│ └── debug_repository.bzl
├── config.sh.tmpl
├── current_versions.txt
├── deploy.sh
├── docs/
│ ├── .gitignore
│ ├── _config.yml
│ ├── concepts/
│ │ ├── app-management.md
│ │ ├── config.md
│ │ ├── device_identity.md
│ │ └── federation.md
│ ├── developers/
│ │ └── debug-auth.md
│ ├── how-to/
│ │ ├── connecting-robot.md
│ │ ├── creating-declarative-api.md
│ │ ├── deploy-from-sources.md
│ │ ├── deploying-grpc-service.md
│ │ ├── deploying-service.md
│ │ ├── examples/
│ │ │ ├── charge-service/
│ │ │ │ ├── Dockerfile
│ │ │ │ ├── charge-action.yaml
│ │ │ │ ├── charge-controller.yaml
│ │ │ │ ├── charge-crd.yaml
│ │ │ │ └── server.py
│ │ │ ├── greeter-service/
│ │ │ │ ├── Makefile
│ │ │ │ ├── client/
│ │ │ │ │ ├── Dockerfile
│ │ │ │ │ └── client.cc
│ │ │ │ ├── deploy.sh
│ │ │ │ ├── greeter-server.yaml.tmpl
│ │ │ │ ├── proto/
│ │ │ │ │ └── helloworld.proto
│ │ │ │ └── server/
│ │ │ │ ├── Dockerfile
│ │ │ │ └── server.cc
│ │ │ └── hello-service/
│ │ │ ├── client/
│ │ │ │ ├── Dockerfile
│ │ │ │ └── client.py
│ │ │ └── server/
│ │ │ ├── Dockerfile
│ │ │ ├── hello-server.yaml
│ │ │ └── server.py
│ │ ├── running-ros-node.md
│ │ ├── setting-up-oauth.md
│ │ └── using-cloud-storage.md
│ ├── index.md
│ ├── overview.md
│ └── quickstart.md
├── new_versions.txt
├── non_module_deps.bzl
├── nvchecker.toml
├── scripts/
│ ├── BUILD.bazel
│ ├── backup_robots.sh
│ ├── check-images.sh
│ ├── common.sh
│ ├── config.sh
│ ├── include-config.sh
│ ├── migrate.sh
│ ├── pre-commit
│ ├── robot-sim.sh
│ └── set-config.sh
├── src/
│ ├── .gitignore
│ ├── BUILD.bazel
│ ├── README.md
│ ├── app_charts/
│ │ ├── BUILD.bazel
│ │ ├── README.md
│ │ ├── akri/
│ │ │ ├── BUILD.bazel
│ │ │ ├── akri-robot.values.yaml
│ │ │ ├── robot/
│ │ │ │ └── akri.yaml
│ │ │ └── values-robot.yaml
│ │ ├── base/
│ │ │ ├── BUILD.bazel
│ │ │ ├── README.md
│ │ │ ├── app_management_test.sh
│ │ │ ├── cert-manager-cloud.values.yaml
│ │ │ ├── cert-manager-google-cas-issuer-cloud.values.yaml
│ │ │ ├── cert-manager-robot.values.yaml
│ │ │ ├── cloud/
│ │ │ │ ├── app-management-policy.yaml
│ │ │ │ ├── app-management.yaml
│ │ │ │ ├── apps-crd.yaml
│ │ │ │ ├── cert-ingress.yaml
│ │ │ │ ├── cert-manager-certificates.yaml
│ │ │ │ ├── cert-manager-google-cas-issuer.yaml
│ │ │ │ ├── cert-manager-issuers.yaml
│ │ │ │ ├── cert-manager.yaml
│ │ │ │ ├── cr-syncer-auth-webhook.yaml
│ │ │ │ ├── cr-syncer-policy.yaml
│ │ │ │ ├── domain-redirect.yaml
│ │ │ │ ├── fluentd-metrics.yaml
│ │ │ │ ├── kubernetes-api.yaml
│ │ │ │ ├── namespace.yaml
│ │ │ │ ├── nginx-ingress-controller-policy.yaml
│ │ │ │ ├── nginx-ingress-controller.yaml
│ │ │ │ ├── oauth2-proxy.yaml
│ │ │ │ ├── registry-crd.yaml
│ │ │ │ ├── registry-policy.yaml
│ │ │ │ ├── relay-dashboards.yaml
│ │ │ │ ├── token-vendor-app-fwd.yaml
│ │ │ │ └── token-vendor-rollout.yaml
│ │ │ ├── fluent-bit-helm.sh
│ │ │ ├── fluent-bit-values.yaml
│ │ │ ├── relay-dashboard.json
│ │ │ ├── robot/
│ │ │ │ ├── app-management.yaml
│ │ │ │ ├── cert-manager-certificates.yaml
│ │ │ │ ├── cert-manager-issuers.yaml
│ │ │ │ ├── cert-manager.yaml
│ │ │ │ ├── cr-syncer.yaml
│ │ │ │ ├── fluent-bit.yaml
│ │ │ │ ├── fluentd-gcp-addon.yaml
│ │ │ │ ├── fluentd-metrics.yaml
│ │ │ │ ├── gcr-credential-refresher.yaml
│ │ │ │ └── metadata-server.yaml
│ │ │ ├── values-cloud.yaml
│ │ │ └── values-robot.yaml
│ │ ├── k8s-relay/
│ │ │ ├── BUILD.bazel
│ │ │ ├── cloud/
│ │ │ │ ├── ingress.yaml
│ │ │ │ ├── kubernetes-relay-server.yaml
│ │ │ │ ├── service-monitor.yaml
│ │ │ │ └── service.yaml
│ │ │ ├── robot/
│ │ │ │ └── kubernetes-relay-client.yaml
│ │ │ ├── values-cloud.yaml
│ │ │ └── values-robot.yaml
│ │ ├── mission-crd/
│ │ │ ├── BUILD.bazel
│ │ │ ├── mission_crd.yaml
│ │ │ └── values.yaml
│ │ ├── platform-apps/
│ │ │ ├── BUILD.bazel
│ │ │ └── values.yaml
│ │ ├── prometheus/
│ │ │ ├── BUILD.bazel
│ │ │ ├── README.md
│ │ │ ├── cloud/
│ │ │ │ ├── app.yaml
│ │ │ │ ├── base-alerts.yaml
│ │ │ │ ├── federation-service-monitor.yaml
│ │ │ │ ├── grafana-ingress.yaml
│ │ │ │ ├── prometheus-ingress.yaml
│ │ │ │ ├── prometheus-operator.yaml
│ │ │ │ ├── prometheus-relay.yaml
│ │ │ │ └── storage-class.yaml
│ │ │ ├── prometheus-cloud.values.yaml
│ │ │ ├── prometheus-robot.values.yaml
│ │ │ ├── robot/
│ │ │ │ ├── hw-exporter.yaml
│ │ │ │ ├── prometheus-adapter.yaml
│ │ │ │ ├── prometheus-operator.yaml
│ │ │ │ ├── prometheus-relay-client.yaml
│ │ │ │ └── smartctl-exporter.yaml
│ │ │ ├── update_prometheus_adapter.sh
│ │ │ └── values-cloud.yaml
│ │ └── token-vendor/
│ │ ├── BUILD.bazel
│ │ ├── cloud/
│ │ │ ├── dashboard.yaml
│ │ │ ├── ingress.yaml
│ │ │ ├── service-monitor.yaml
│ │ │ ├── service.yaml
│ │ │ ├── token-vendor-policy.yaml
│ │ │ └── token-vendor.yaml
│ │ └── dashboard.json
│ ├── bootstrap/
│ │ ├── cloud/
│ │ │ ├── BUILD.bazel
│ │ │ ├── INSTALL_FROM_BINARY
│ │ │ ├── run-install.sh
│ │ │ └── terraform/
│ │ │ ├── .gitignore
│ │ │ ├── BUILD.bazel
│ │ │ ├── README.md
│ │ │ ├── address.tf
│ │ │ ├── certificate-authority.tf
│ │ │ ├── cluster.tf
│ │ │ ├── dns.tf
│ │ │ ├── endpoints.tf
│ │ │ ├── gcs.tf
│ │ │ ├── input.tf
│ │ │ ├── logging.tf
│ │ │ ├── multi-cluster-ingress.tf
│ │ │ ├── output.tf
│ │ │ ├── project.tf
│ │ │ ├── provider.tf
│ │ │ ├── registry.tf
│ │ │ ├── service-account.tf
│ │ │ ├── versions.tf
│ │ │ ├── workload-identity.tf
│ │ │ └── www.yaml
│ │ └── robot/
│ │ ├── BUILD.bazel
│ │ └── setup_robot.sh
│ ├── go/
│ │ ├── cmd/
│ │ │ ├── app-rollout-controller/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ └── main.go
│ │ │ ├── chart-assignment-controller/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ └── main.go
│ │ │ ├── cr-syncer/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── health.go
│ │ │ │ ├── health_test.go
│ │ │ │ ├── main.go
│ │ │ │ ├── main_test.go
│ │ │ │ ├── syncer.go
│ │ │ │ └── syncer_test.go
│ │ │ ├── cr-syncer-auth-webhook/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── main.go
│ │ │ │ ├── request.go
│ │ │ │ └── request_test.go
│ │ │ ├── gcr-credential-refresher/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ └── main.go
│ │ │ ├── http-relay-client/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── client/
│ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ ├── client.go
│ │ │ │ │ └── client_test.go
│ │ │ │ └── main.go
│ │ │ ├── http-relay-server/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── README.md
│ │ │ │ ├── main.go
│ │ │ │ └── server/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── broker.go
│ │ │ │ ├── broker_test.go
│ │ │ │ ├── server.go
│ │ │ │ └── server_test.go
│ │ │ ├── hw-exporter/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── main.go
│ │ │ │ └── main_test.go
│ │ │ ├── metadata-server/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── coredns.go
│ │ │ │ ├── coredns_test.go
│ │ │ │ ├── main.go
│ │ │ │ ├── main_test.go
│ │ │ │ ├── metadata.go
│ │ │ │ ├── metadata_test.go
│ │ │ │ └── nftables.go
│ │ │ ├── setup-dev/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── main.go
│ │ │ │ └── setup-dev.md
│ │ │ ├── setup-robot/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── main.go
│ │ │ │ └── main_test.go
│ │ │ ├── synk/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── README.md
│ │ │ │ └── synk.go
│ │ │ └── token-vendor/
│ │ │ ├── BUILD.bazel
│ │ │ ├── README.md
│ │ │ ├── api/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── api.go
│ │ │ │ └── v1/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── testdata/
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── cloudiot/
│ │ │ │ │ │ ├── describe_device.json
│ │ │ │ │ │ └── describe_device_expired_key.json
│ │ │ │ │ ├── rsa_cert.pem
│ │ │ │ │ └── rsa_private.pem
│ │ │ │ ├── v1.go
│ │ │ │ └── v1_test.go
│ │ │ ├── app/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── tokenvendor.go
│ │ │ │ └── tokenvendor_test.go
│ │ │ ├── main.go
│ │ │ ├── oauth/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── cache.go
│ │ │ │ ├── cache_test.go
│ │ │ │ ├── jwt/
│ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ ├── jwt.go
│ │ │ │ │ └── jwt_test.go
│ │ │ │ └── verifier.go
│ │ │ ├── repository/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── k8s/
│ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ ├── k8s.go
│ │ │ │ │ └── k8s_test.go
│ │ │ │ ├── memory/
│ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ ├── memory.go
│ │ │ │ │ └── memory_test.go
│ │ │ │ └── repository.go
│ │ │ ├── testdata/
│ │ │ │ ├── describe_device_a.json
│ │ │ │ ├── describe_device_b.json
│ │ │ │ ├── describe_device_b_blocked.json
│ │ │ │ └── list_devices.json
│ │ │ └── tokensource/
│ │ │ ├── BUILD.bazel
│ │ │ ├── gcp.go
│ │ │ └── gcp_test.go
│ │ ├── generate.sh
│ │ ├── pkg/
│ │ │ ├── apis/
│ │ │ │ ├── apps/
│ │ │ │ │ └── v1alpha1/
│ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ ├── doc.go
│ │ │ │ │ ├── register.go
│ │ │ │ │ ├── types.go
│ │ │ │ │ └── zz_generated.deepcopy.go
│ │ │ │ └── registry/
│ │ │ │ └── v1alpha1/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── doc.go
│ │ │ │ ├── register.go
│ │ │ │ ├── types.go
│ │ │ │ └── zz_generated.deepcopy.go
│ │ │ ├── client/
│ │ │ │ ├── informers/
│ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ ├── apps/
│ │ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ │ ├── interface.go
│ │ │ │ │ │ └── v1alpha1/
│ │ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ │ ├── app.go
│ │ │ │ │ │ ├── approllout.go
│ │ │ │ │ │ ├── chartassignment.go
│ │ │ │ │ │ ├── interface.go
│ │ │ │ │ │ └── resourceset.go
│ │ │ │ │ ├── factory.go
│ │ │ │ │ ├── generic.go
│ │ │ │ │ ├── internalinterfaces/
│ │ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ │ └── factory_interfaces.go
│ │ │ │ │ └── registry/
│ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ ├── interface.go
│ │ │ │ │ └── v1alpha1/
│ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ ├── interface.go
│ │ │ │ │ └── robot.go
│ │ │ │ ├── listers/
│ │ │ │ │ ├── apps/
│ │ │ │ │ │ └── v1alpha1/
│ │ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ │ ├── app.go
│ │ │ │ │ │ ├── approllout.go
│ │ │ │ │ │ ├── chartassignment.go
│ │ │ │ │ │ ├── expansion_generated.go
│ │ │ │ │ │ └── resourceset.go
│ │ │ │ │ └── registry/
│ │ │ │ │ └── v1alpha1/
│ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ ├── expansion_generated.go
│ │ │ │ │ └── robot.go
│ │ │ │ └── versioned/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── clientset.go
│ │ │ │ ├── doc.go
│ │ │ │ ├── fake/
│ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ ├── clientset_generated.go
│ │ │ │ │ ├── doc.go
│ │ │ │ │ └── register.go
│ │ │ │ ├── scheme/
│ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ ├── doc.go
│ │ │ │ │ └── register.go
│ │ │ │ └── typed/
│ │ │ │ ├── apps/
│ │ │ │ │ └── v1alpha1/
│ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ ├── app.go
│ │ │ │ │ ├── approllout.go
│ │ │ │ │ ├── apps_client.go
│ │ │ │ │ ├── chartassignment.go
│ │ │ │ │ ├── doc.go
│ │ │ │ │ ├── fake/
│ │ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ │ ├── doc.go
│ │ │ │ │ │ ├── fake_app.go
│ │ │ │ │ │ ├── fake_approllout.go
│ │ │ │ │ │ ├── fake_apps_client.go
│ │ │ │ │ │ ├── fake_chartassignment.go
│ │ │ │ │ │ └── fake_resourceset.go
│ │ │ │ │ ├── generated_expansion.go
│ │ │ │ │ └── resourceset.go
│ │ │ │ └── registry/
│ │ │ │ └── v1alpha1/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── doc.go
│ │ │ │ ├── fake/
│ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ ├── doc.go
│ │ │ │ │ ├── fake_registry_client.go
│ │ │ │ │ └── fake_robot.go
│ │ │ │ ├── generated_expansion.go
│ │ │ │ ├── registry_client.go
│ │ │ │ └── robot.go
│ │ │ ├── configutil/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── config_reader.go
│ │ │ │ └── config_reader_test.go
│ │ │ ├── controller/
│ │ │ │ ├── approllout/
│ │ │ │ │ ├── BUILD.bazel
│ │ │ │ │ ├── controller.go
│ │ │ │ │ └── controller_test.go
│ │ │ │ └── chartassignment/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── controller.go
│ │ │ │ ├── release.go
│ │ │ │ ├── release_test.go
│ │ │ │ ├── validator.go
│ │ │ │ └── validator_test.go
│ │ │ ├── gcr/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── update_gcr_credential_test.go
│ │ │ │ └── update_gcr_credentials.go
│ │ │ ├── kubetest/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ └── kubetest.go
│ │ │ ├── kubeutils/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ └── kubeutils.go
│ │ │ ├── robotauth/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── robotauth.go
│ │ │ │ └── robotauth_test.go
│ │ │ ├── setup/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── setupcommon.go
│ │ │ │ ├── setupcommon_test.go
│ │ │ │ └── util/
│ │ │ │ ├── BUILD.bazel
│ │ │ │ ├── factory.go
│ │ │ │ └── fake.go
│ │ │ └── synk/
│ │ │ ├── BUILD.bazel
│ │ │ ├── interface.go
│ │ │ ├── sort.go
│ │ │ ├── sort_test.go
│ │ │ ├── synk.go
│ │ │ └── synk_test.go
│ │ └── tests/
│ │ ├── BUILD.bazel
│ │ ├── apps/
│ │ │ ├── BUILD.bazel
│ │ │ ├── apps_test.go
│ │ │ └── run.sh
│ │ ├── k8s_integration_test.go
│ │ ├── k8s_integration_test_auth_helper.go
│ │ ├── relay/
│ │ │ ├── BUILD.bazel
│ │ │ ├── in_process_relay_test.go
│ │ │ └── nok8s_relay_test.go
│ │ ├── relay-bench.sh
│ │ └── relay_test.sh
│ ├── go.mod
│ ├── go.sum
│ ├── gomod.sh
│ └── proto/
│ └── http-relay/
│ ├── BUILD.bazel
│ ├── http_over_rpc.proto
│ └── unused.go
└── third_party/
├── BUILD
├── BUILD.bazel
├── README.md
├── akri/
│ ├── BUILD.bazel
│ ├── akri-0.12.9.tgz
│ ├── akri-configuration-crd.yaml
│ ├── akri-instance-crd.yaml
│ └── update-akri.sh
├── app_crd.BUILD
├── cert-manager/
│ ├── BUILD.bazel
│ └── cert-manager-v1.16.3.tgz
├── cert-manager-google-cas-issuer/
│ ├── BUILD.bazel
│ └── cert-manager-google-cas-issuer-v0.6.2.tgz
├── fluentd_gcp_addon/
│ ├── BUILD.bazel
│ ├── fluentd-gcp-configmap.yaml
│ └── fluentd-gcp-ds.yaml
├── helm2/
│ └── BUILD.bazel
├── helm3/
│ └── BUILD.bazel
├── ingress-nginx.BUILD
├── kube-prometheus-stack/
│ ├── 00-crds.yaml
│ ├── 01-crds.yaml
│ ├── BUILD.bazel
│ ├── kube-prometheus-stack-72.9.1.tgz
│ └── update_crd.sh
├── kubernetes_proto/
│ ├── meta/
│ │ ├── BUILD.bazel
│ │ ├── README.md
│ │ └── generated.proto
│ ├── runtime/
│ │ ├── BUILD.bazel
│ │ └── generated.proto
│ └── schema/
│ ├── BUILD.bazel
│ └── generated.proto
└── terraform.BUILD
================================================
FILE CONTENTS
================================================
================================================
FILE: .bazelignore
================================================
src/.gopath
================================================
FILE: .bazelrc
================================================
# Enable Bzlmod for every Bazel command
common --enable_bzlmod
# Work around go issue with LLVM 15+: https://github.com/bazelbuild/rules_go/issues/3691#issuecomment-2263999685
build --@io_bazel_rules_go//go/config:linkmode=pie
# Enforce stricter environment rules, which eliminates some non-hermetic
# behavior and therefore improves both the remote_cache cache hit rate and the
# correctness and repeatability of the build.
build --incompatible_strict_action_env=true
# Make sure that no regressions are introduced until the flag is flipped
# See: https://github.com/bazelbuild/bazel/issues/8195
build --incompatible_disallow_empty_glob
# Use the new paths.
# https://github.com/bazelbuild/bazel/issues/23127
common --incompatible_use_plus_in_repo_names
# Always use the pre-configured toolchain.
build --repo_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
build --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
# Set a higher timeout value, just in case.
build --remote_timeout=3600
# Platform flags
# The toolchain container used for execution is defined in the target indicated
# by "extra_execution_platforms", "host_platform" and "platforms".
# More about platforms: https://docs.bazel.build/versions/master/platforms.html
build:linux_x86_64 --extra_execution_platforms=//bazel:linux_x86_64
build:linux_x86_64 --host_platform=//bazel:linux_x86_64
build:linux_x86_64 --platforms=//bazel:linux_x86_64
build:remote --remote_executor=grpcs://remotebuildexecution.googleapis.com
build:remote_cache --remote_cache=grpcs://remotebuildexecution.googleapis.com
# Enable authentication. This will pick up application default credentials by
# default. You can use --google_credentials=some_file.json to use a service
# account credential instead.
build:remote --google_default_credentials=true
build:remote_cache --google_default_credentials=true
# RBE builds only support linux_x86_64.
build:remote --config=linux_x86_64
build:remote_cache --config=linux_x86_64
# Don't run integration tests and tests that need docker by default
test --test_tag_filters="-external,-requires-docker"
================================================
FILE: .bazelversion
================================================
8.4.2
================================================
FILE: .dockerignore
================================================
*
================================================
FILE: .editorconfig
================================================
# Editor configuration, see http://editorconfig.org
root = true
[*]
charset = utf-8
indent_style = space
indent_size = 2
insert_final_newline = true
trim_trailing_whitespace = true
[*.go]
indent_style = tab
indent_size = 8
[*.md]
max_line_length = off
trim_trailing_whitespace = false
================================================
FILE: .github/ci/.bazelrc
================================================
# Bazel config for CI/CD builds.
# Default to keep going
build --keep_going
# Use rbe remote execution and caching on robco-integration-test.
build --config=remote
build --remote_instance_name=projects/robco-integration-test/instances/default_instance
build --google_default_credentials=true
# Slightly higher than the numer of available remote workers (10 in default_instance).
# This has not been tuned a lot.
build --jobs=12
# No neeed to download every intermediate output to the local runner.
build --remote_download_toplevel
# Use Result Store to store Build and Test logs .
build --bes_backend=buildeventservice.googleapis.com
build --bes_results_url=https://source.cloud.google.com/results/invocations
build --bes_timeout=600s
build --bes_instance_name=robco-integration-test
# Try to mitigate DEADLINE_EXCEEDED errors (b/346715839).
# Remove experimental_ prefix when updating Bazel.
build --experimental_build_event_upload_max_retries=8
================================================
FILE: .github/ci/Dockerfile.integration-test-image
================================================
# Image used for integration_test.sh on Cloud Build.
# Allows access to GKE and to run Bazel commands.
FROM gcr.io/cloud-builders/kubectl
# Install Bazelisk
RUN \
VERSION="v1.21.0" && \
curl -L https://github.com/bazelbuild/bazelisk/releases/download/${VERSION}/bazelisk-linux-amd64 --output /usr/bin/bazelisk && \
chmod +x /usr/bin/bazelisk && \
ln -s /usr/bin/bazelisk /usr/bin/bazel
RUN mkdir -p /builder /output /workspace && chmod -R 777 /output
# rules_python is not happy if bazel runs as root so create a new user
# https://github.com/bazelbuild/rules_python/pull/713
# https://github.com/GoogleCloudPlatform/cloud-builders/issues/641
RUN adduser builder --disabled-password
# Allow running sudo without password
# Add libtinfo5, which is required locally until we can upgrade to LLVM 19
RUN apt-get update && apt-get install -y sudo libtinfo5 && apt-get clean && rm -rf /var/lib/apt/lists/* && \
usermod -aG sudo builder && \
echo "builder ALL=(ALL) NOPASSWD:ALL" > "/etc/sudoers.d/builder" && chmod 440 "/etc/sudoers.d/builder"
# For some reason //src/go/tests:go_default_test is expecting
# the kubeconfig in /home/builder/.kube/config, i.e. it does not use $HOME
# (which is /builder/home). alexanderfaxa@ could not figure out why so just
# copy the config there.
RUN mkdir -p /home/builder/.kube && \
ln -s /builder/home/.kube/config /home/builder/.kube/config
USER builder
================================================
FILE: .github/ci/common.sh
================================================
#!/bin/bash
# Format for the xtrace lines
export 'PS4=+$(date --rfc-3339=seconds):${BASH_SOURCE}:${LINENO}: '
set -o errexit # exit immediately, if a pipeline command fails
set -o pipefail # returns the last command to exit with a non-zero status
set -o xtrace # print command traces before executing command
RUNFILES_ROOT="_main"
# Wraps the common Bazel flags for CI for brevity.
function bazel_ci {
bazelisk --bazelrc="${DIR}/.bazelrc" "$@"
}
function generate_build_id() {
# Considerations for a build identifier: It must be unique, it shouldn't break
# if we try multiple dailies in a day, and it would be nice if a textual sort
# would put newest releases last.
git_hash=$(echo "$GITHUB_SHA" | cut -c1-6)
date "+daily-%Y-%m-%d-${git_hash}"
}
# Pushes images and releases a binary to a specified bucket.
# bucket: target GCS bucket to release to
# name: name of the release tar ball
# labels: optional list of filename aliases for the release, these are one-line
# text files with the release name as a bucket local path
function release_binary {
local bucket="$1"
local name="$2"
# This function is called from test and release pipelines. We (re)build the binary and push the
# app images here to ensure the app images which are referenced in the binary exist in the
# registry.
bazel_ci build \
//src/bootstrap/cloud:crc-binary \
//src/app_charts:push \
//src/go/cmd/setup-robot:setup-robot.push
# The push scripts depends on binaries in the runfiles.
local oldPwd
oldPwd=$(pwd)
# The tag variable must be called 'TAG', see cloud-robotics/bazel/container_push.bzl
for t in latest ${DOCKER_TAG}; do
cd ${oldPwd}/bazel-bin/src/go/cmd/setup-robot/push_setup-robot.push.sh.runfiles/${RUNFILES_ROOT}
${oldPwd}/bazel-bin/src/go/cmd/setup-robot/push_setup-robot.push.sh \
--repository="${CLOUD_ROBOTICS_CONTAINER_REGISTRY}/setup-robot" \
--tag="${t}"
cd ${oldPwd}/bazel-bin/src/app_charts/push.runfiles/${RUNFILES_ROOT}
TAG="$t" ${oldPwd}/bazel-bin/src/app_charts/push "${CLOUD_ROBOTICS_CONTAINER_REGISTRY}"
done
cd ${oldPwd}
gcloud storage cp \
--predefined-acl=publicRead \
bazel-bin/src/bootstrap/cloud/crc-binary.tar.gz \
"gs://${bucket}/${name}.tar.gz"
# Overwrite cache control as we want changes to run-install.sh and version files to be visible
# right away.
gcloud storage cp \
--predefined-acl=publicRead \
--cache-control="private, max-age=0, no-transform" \
src/bootstrap/cloud/run-install.sh \
"gs://${bucket}/"
# The remaining arguments are version labels. GCS does not support symlinks, so we use version
# files instead.
local vfile
vfile=$(mktemp)
echo "${name}.tar.gz" >${vfile}
shift 2
# Loop over remianing args in $* and creat alias files.
for label; do
gcloud storage cp \
--predefined-acl=publicRead \
--cache-control="private, max-age=0, no-transform" \
${vfile} "gs://${bucket}/${label}"
done
}
================================================
FILE: .github/ci/deploy_navtest.sh
================================================
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${DIR}/common.sh"
PROJECT_DIR="${DIR}/deployments/robco-navtest"
source "${PROJECT_DIR}/config.sh"
# TODO(skopecki) These variables should be declared in the run-install.sh and removed from this script.
export BUCKET_URI="https://storage.googleapis.com/robco-ci-binary-builds"
export SOURCE_CONTAINER_REGISTRY="gcr.io/robco-team"
# Deploy the binary release that was pushed by the last successful integration test.
curl --silent --show-error --fail "${BUCKET_URI}/run-install.sh" \
| bash -x -s -- ${GCP_PROJECT_ID}
================================================
FILE: .github/ci/deploy_navtest_cloudbuild.yaml
================================================
# Call deploy_navtest.sh on Cloud Build.
# TODO(b/323509860): Run directly on the Action runner when it supports WIF.
steps:
- name: "gcr.io/cloud-builders/gcloud"
entrypoint: "bash"
args: ["./.github/ci/deploy_navtest.sh"]
timeout: 1200s
================================================
FILE: .github/ci/deployments/robco-integration-test/config.sh
================================================
#!/usr/bin/env bash
# Enable cloud robotics layer 2
APP_MANAGEMENT=true
GCP_PROJECT_ID=robco-integration-test
GCP_REGION=europe-west1
GCP_ZONE=europe-west1-c
CLOUD_ROBOTICS_SHARED_OWNER_GROUP=cloud-robotics-cloud-owner-acl@twosync.google.com
CLOUD_ROBOTICS_DEPLOY_ENVIRONMENT=GCP-testing
TERRAFORM_GCS_BUCKET="robco-team-terraform-state"
TERRAFORM_GCS_PREFIX="state/${GCP_PROJECT_ID}"
CLOUD_ROBOTICS_CONTAINER_REGISTRY=gcr.io/robco-team
PRIVATE_DOCKER_PROJECTS=robco-team
CLOUD_ROBOTICS_CTX=gke_robco-integration-test_europe-west1-c_cloud-robotics
================================================
FILE: .github/ci/deployments/robco-integration-test/kubernetes/k8s-relay-rollout.yaml
================================================
apiVersion: apps.cloudrobotics.com/v1alpha1
kind: AppRollout
metadata:
name: k8s-relay
labels:
app: k8s-relay
spec:
appName: k8s-relay-dev
cloud: {}
robots:
- selector:
any: true
================================================
FILE: .github/ci/deployments/robco-navtest/config.sh
================================================
#!/usr/bin/env bash
# Enable google cloud robotics layer 2
APP_MANAGEMENT=true
GCP_PROJECT_ID=robco-navtest
GCP_REGION=europe-west1
GCP_ZONE=europe-west1-c
CLOUD_ROBOTICS_SHARED_OWNER_GROUP=cloud-robotics-cloud-owner-acl@twosync.google.com
TERRAFORM_GCS_BUCKET="robco-team-terraform-state"
TERRAFORM_GCS_PREFIX="state/${GCP_PROJECT_ID}"
CLOUD_ROBOTICS_CONTAINER_REGISTRY=gcr.io/robco-team
PRIVATE_DOCKER_PROJECTS=robco-team
CLOUD_ROBOTICS_CTX=gke_robco-navtest_europe-west1-c_cloud-robotics
================================================
FILE: .github/ci/integration_test.sh
================================================
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${DIR}/common.sh"
source "./scripts/common.sh"
# Because the format from common.sh is not recognized by Cloud Build.
export 'PS4='
# Need to source the project config from here
PROJECT_DIR="${DIR}/deployments/robco-integration-test"
source "${PROJECT_DIR}/config.sh"
gcloud config set project ${GCP_PROJECT_ID}
gke_get_credentials "${GCP_PROJECT_ID}" "cloud-robotics" "${GCP_REGION}" "${GCP_ZONE}"
BUILD_IDENTIFIER=$(generate_build_id)
echo "INFO: Build identifier is $BUILD_IDENTIFIER"
export BAZEL_FLAGS="--bazelrc=${DIR}/.bazelrc"
bash -x ./deploy.sh update "${GCP_PROJECT_ID}"
# Create a GKE cluster with a single robot for the relay test.
ROBOT_RELAY_CLUSTER="relay-test"
export SKIP_LOCAL_PULL=true
bash -x ./scripts/robot-sim.sh create "${GCP_PROJECT_ID}" "${ROBOT_RELAY_CLUSTER}"
bazel_ci run //src/go/cmd/setup-dev -- --project="${GCP_PROJECT_ID}" --robot-name="${ROBOT_RELAY_CLUSTER}"
DOMAIN=${CLOUD_ROBOTICS_DOMAIN:-"www.endpoints.${GCP_PROJECT_ID}.cloud.goog"}
ROBOT_CONTEXT="gke_${GCP_PROJECT_ID}_${GCP_ZONE}_${ROBOT_RELAY_CLUSTER}"
# Output state of cloud and robot k8s context to inspect the health of pods.
kubectl config get-contexts || true
kubectl --context ${CLOUD_ROBOTICS_CTX} get pods || true
kubectl --context ${GCP_PROJECT_ID}-robot get pods || true
kubectl --context ${ROBOT_CONTEXT} get pods || true
bazel_ci test \
--test_env GCP_PROJECT_ID=${GCP_PROJECT_ID} \
--test_env GCP_REGION=${GCP_REGION} \
--test_env GCP_ZONE=${GCP_ZONE} \
--test_env CLUSTER=${ROBOT_RELAY_CLUSTER} \
--test_env PATH=$PATH \
--jvmopt="-DCLOUD_ROBOTICS_DOMAIN=${DOMAIN}" \
--test_output=streamed \
--test_tag_filters="external" \
--strategy=TestRunner=standalone \
//...
# If this is running on main (ie, not a manual run) then update the `latest`
# binary.
if [[ "$MANUAL_RUN" == "false" ]] ; then
release_binary "robco-ci-binary-builds" "crc-${BUILD_IDENTIFIER}" "latest"
fi
================================================
FILE: .github/ci/integration_test_cloudbuild.yaml
================================================
# A Cloud Build job for running integration_test.sh.
# TODO(b/323509860): Run directly on the Action runner when it supports WIF.
steps:
# Needed for cloud build to allow running Bazel as non-root, see
# https://github.com/GoogleCloudPlatform/cloud-builders/issues/641#issuecomment-604599102
# Not part of the Dockerfile since the chmod layer adds significant image size.
- name: ubuntu
entrypoint: "bash"
args: ["-c", "chmod -R 777 /builder && chmod -R 777 /workspace"]
# This runs on a custom image that has kubectl, gcloud and bazel installed.
# See Dockerfile.integration-test-image.
- name: "gcr.io/robco-integration-test/integration-test-image@sha256:87e7cde1d2923eed014ee8ee0c365d683fa7a8a99985bbc77acb951c2e6faefc"
entrypoint: "bash"
args: ["./.github/ci/integration_test.sh"]
env:
- "GITHUB_SHA=${_GITHUB_SHA}"
- "MANUAL_RUN=${_MANUAL_RUN}"
substitutions:
_GITHUB_SHA: ""
_MANUAL_RUN: ""
options:
dynamicSubstitutions: true
substitutionOption: "MUST_MATCH"
timeout: 1800s
================================================
FILE: .github/ci/integration_test_image_builder.sh
================================================
#!/bin/bash
# Builds and pushes a docker image that can be used in Cloud Build to run
# the integration test (see integration_test_cloudbuild.yaml).
#
# To be manually invoked and the resulting sha256 copied to
# integration_test_cloudbuild.yaml after changing the Dockerfile.
set -euo pipefail
NAME="gcr.io/robco-integration-test/integration-test-image"
docker build --network=host -t "${NAME}" - \
< .github/ci/Dockerfile.integration-test-image
docker push "${NAME}"
================================================
FILE: .github/ci/presubmit.sh
================================================
#!/bin/bash
#
# Presubmit script for testing cloud robotics.
# Expected to run remotely on a GitHub Actions runner, not locally.
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# shellcheck source=ci/common.sh
source "${DIR}/common.sh"
echo "Timestamp: build started"
bazel_ci build --nobuild //...
echo "Timestamp: build-deps fetched"
bazel_ci build //...
echo "Timestamp: build done"
bazel_ci test --test_output=errors //...
echo "Timestamp: test done"
# Some of the tests below pull Docker images from the repository. We need to
# make sure they are pushed and provide an access token.
gcloud auth configure-docker --quiet
REGISTRY="gcr.io/robco-integration-test"
TAG="latest" bazel_ci run \
//src/app_charts:push "${REGISTRY}"
# We're running into timeouts at CI and also don't see the actual failure
# reasons. Disable the test for now until someone has time and ideas how to
# resurrect it.
#
# set +o xtrace # Don't put the access token in the logs.
# ACCESS_TOKEN="$(gcloud auth application-default print-access-token)"
# Note: --strategy=TestRunner=standalone means that the tests are run locally
# and not on a remote worker (which does not have the Docker environment).
# bazel_ci test \
# --flaky_test_attempts 3 \
# --test_env ACCESS_TOKEN="${ACCESS_TOKEN}" \
# --test_env REGISTRY="${REGISTRY}" \
# --test_tag_filters="requires-docker" \
# --test_output=errors \
# --strategy=TestRunner=standalone //src/go/tests/apps:go_default_test
#
# set -o xtrace
echo "Timestamp: presubmit.sh done"
================================================
FILE: .github/ci/release_binary.sh
================================================
#!/bin/bash
set -euo pipefail
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# shellcheck source=ci/common.sh
source "${DIR}/common.sh"
gcloud auth configure-docker --quiet
# Set defaults used for the release, they can only be overriden when testing
# manually.
GCP_BUCKET=${GCP_BUCKET:-"cloud-robotics-releases"}
VERSION=${VERSION:-"0.1.0"}
SHA=$(git rev-parse --short "$FULL_SHA")
RELEASE_NAME="v$VERSION-$SHA"
# TAG is a global variable that is used in the container push rules.
export TAG="crc-${VERSION}-${SHA}"
LABELS=${LABELS:-"latest crc-${VERSION}/crc-${VERSION}+latest"}
# Get the last release. We only create a new release if the main branch has moved since
# as trying to re-create an existing release is an error.
output=$(curl --fail-with-body -sS \
-H "Accept: application/vnd.github+json" \
-H "Authorization: token $GITHUB_TOKEN" \
https://api.github.com/repos/$REPO/releases/latest)
PREVIOUS_RELEASE_NAME="$(jq -r '.tag_name' <<< $output)"
if [ "$RELEASE_NAME" = "$PREVIOUS_RELEASE_NAME" ]; then
echo "Release $RELEASE_NAME already exists. Nothing more to do."
exit 0
else
echo "Previous release is $PREVIOUS_RELEASE_NAME"
fi
CLOUD_ROBOTICS_CONTAINER_REGISTRY="gcr.io/cloud-robotics-releases"
# DOCKER_TAG is a global variable that is used in release_binary.
DOCKER_TAG=${DOCKER_TAG:-"crc-${VERSION}-${SHA}"}
release_binary "${GCP_BUCKET}" "crc-${VERSION}/crc-${VERSION}+${SHA}" ${LABELS}
# Generate release notes comparing against the previous release.
output=$(curl --fail-with-body -sS \
-X POST \
-H "Accept: application/vnd.github+json" \
-H "Authorization: token $GITHUB_TOKEN" \
https://api.github.com/repos/$REPO/releases/generate-notes \
-d '{"tag_name":"'$RELEASE_NAME'","previous_tag_name":"'$PREVIOUS_RELEASE_NAME'"}')
# Code newlines as literal \n and escape double quotes to generate valid JSON.
BODY="$(jq -r '.body' <<< $output | awk '{printf "%s\\n", $0}' | sed 's/"/\\"/g')"
echo "Generated release notes for $RELEASE_NAME"
# Create the release on GitHub.
curl --fail-with-body -sS \
-X POST \
-H "Accept: application/vnd.github+json" \
-H "Authorization: token $GITHUB_TOKEN" \
https://api.github.com/repos/$REPO/releases \
--data-binary @- << EOF
{
"tag_name": "$RELEASE_NAME",
"name": "$RELEASE_NAME",
"body": "$BODY"
}
EOF
================================================
FILE: .github/dependabot.yml
================================================
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/src/"
schedule:
interval: "weekly"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "bazel"
directory: "/"
schedule:
interval: "weekly"
================================================
FILE: .github/workflows/check-bazel.yml
================================================
name: Check Bazel
on:
workflow_call:
permissions:
contents: read
id-token: write
jobs:
check-bazel:
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2
- name: Auth
uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 # tag=v3.0.0
with:
create_credentials_file: true # also sets GOOGLE_APPLICATION_CREDENTIALS
service_account: "github-automation-bot@gha-crc-dev.iam.gserviceaccount.com"
workload_identity_provider: "projects/1043719249528/locations/global/workloadIdentityPools/github-automation/providers/crc-dev"
- name: Print error on auth fail
if: failure()
run: |
echo >&2 "This PR appears to be from a fork or authored by a non-org member, rather than from the primary repo."
echo >&2 "This means it can't run the presubmit, which requires access to GCR."
echo >&2 "If you are a project member, please push your branch to github.com/googlecloudrobotics/core instead."
exit 1
- name: Run .github/ci/presubmit.sh
run: ./.github/ci/presubmit.sh
- name: Get bazel server logs
if: success() || failure()
run: cat ~/.cache/bazel/_bazel_*/*/java.log || true
================================================
FILE: .github/workflows/postsubmit.yml
================================================
name: Postsubmit
on:
schedule:
- cron: "0 4 * * *" # Once a day at 4am.
# Manual runs through Actions tab in the UI
workflow_dispatch:
inputs:
force-binary-release:
description: >-
force-binary-release: Set to non-empty when running from main to
create a binary release that can be used by 'Create release'.
permissions:
contents: read
id-token: write
concurrency:
group: integration_test
cancel-in-progress: true
jobs:
call-bazel:
uses: ./.github/workflows/check-bazel.yml
integration-test:
runs-on: ubuntu-22.04
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2
- name: Auth
uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 # tag=v3.0.0
with:
create_credentials_file: true # also sets GOOGLE_APPLICATION_CREDENTIALS
service_account: "github-automation-bot@gha-crc-dev.iam.gserviceaccount.com"
workload_identity_provider: "projects/1043719249528/locations/global/workloadIdentityPools/github-automation/providers/crc-dev"
- name: Run integration_test.sh on Cloud Build
env:
MANUAL_RUN: "${{ github.event_name == 'workflow_dispatch' && inputs.force-binary-release == '' }}"
run: |
gcloud builds submit \
--project robco-integration-test \
--region europe-west1 \
--config .github/ci/integration_test_cloudbuild.yaml \
--substitutions _GITHUB_SHA=${GITHUB_SHA},_MANUAL_RUN=${MANUAL_RUN}
================================================
FILE: .github/workflows/presubmit.yml
================================================
name: Presubmit
on:
pull_request:
branches: ["main"]
workflow_dispatch:
permissions:
contents: read
id-token: write
pull-requests: read
# Cancel previous runs if a new one is started.
concurrency:
group: ${{ github.ref }}
cancel-in-progress: true
jobs:
setup-presubmit:
runs-on: ubuntu-22.04
outputs:
presubmit_digest: ${{ steps.pr-digest.outputs.digest }}
presubmit_status: ${{ steps.status.outputs.status }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2
- name: Get PR digest
id: pr-digest
env:
GH_TOKEN: ${{ github.token }}
run: |
set -euo pipefail
# Get the list of changed files in the PR.
gh pr view ${{github.event.number}} --json files -q '.files[].path' > /tmp/changed_files.txt
# Create a tarball of the changed files and compute its SHA256.
# --ignore-failed-read to not fail on deleted files.
tar -cvf /tmp/changed_files.tar \
--owner=root --group=root --numeric-owner --mtime="2010-01-01" --sort=name \
-T /tmp/changed_files.txt \
--ignore-failed-read
digest=$(cat /tmp/changed_files.txt /tmp/changed_files.tar | sha256sum | cut -d " " -f1)
echo "digest=$digest" >> $GITHUB_OUTPUT
- uses: actions/cache/restore@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5.0.5
with:
path: ~/PRESUBMITS_SUCCEEDED
key: PRESUBMITS_SUCCEEDED-${{ steps.pr-digest.outputs.digest }}
- name: Check for previous runs
id: status
run: |
if [ -f ~/PRESUBMITS_SUCCEEDED ]; then
echo "status=success" >> $GITHUB_OUTPUT
fi
call-check-bazel:
needs: [setup-presubmit]
if: ${{ needs.setup-presubmit.outputs.presubmit_status != 'success' }}
uses: ./.github/workflows/check-bazel.yml
presubmits-ok:
needs: [setup-presubmit, call-check-bazel]
runs-on: ubuntu-22.04
# To ensure this job always runs even if the "heavy" jobs were skipped.
# This allows us to guard merging on this check in Branch Protection.
if: ${{ always() }}
steps:
- name: Fail if tests failed
# always() because GitHub requires a status macro to be included or else this gets skipped.
# https://github.com/actions/runner/issues/491#issuecomment-850884422
if: ${{ always() && (contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')) }}
run: exit 1
- name: Create presubmits succeeded marker
run: touch ~/PRESUBMITS_SUCCEEDED
- uses: actions/cache/save@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5.0.5
with:
path: ~/PRESUBMITS_SUCCEEDED
key: PRESUBMITS_SUCCEEDED-${{ needs.setup-presubmit.outputs.presubmit_digest }}
================================================
FILE: .github/workflows/release.yml
================================================
name: Create release
on:
schedule:
- cron: "0 5 * * *" # Once a day at 5am.
# Manual runs through Actions tab in the UI
workflow_dispatch:
permissions:
actions: read
contents: write
id-token: write
pull-requests: read
# Cancel previous runs if a new one is started.
concurrency:
group: ${{ github.ref }}
cancel-in-progress: true
jobs:
create_release:
runs-on: ubuntu-22.04
steps:
# Check out repo at latest green postsubmit commit on the main branch.
- name: Get latest passing commit
id: latest-green
env:
REPO: ${{ github.repository }}
run: |
set -euo pipefail
output=$(curl --fail-with-body -sS \
-H "Accept: application/vnd.github+json" \
"https://api.github.com/repos/$REPO/actions/workflows/postsubmit.yml/runs?per_page=1&branch=main&event=schedule&status=success")
repo_id=$(jq -r '.workflow_runs[0].head_repository.id' <<< $output)
if [[ "${repo_id}" != "${{ github.repository_id }}" ]] ; then
echo >&2 "Unexpected head repository ID: ${repo_id} - check postsubmit.yml configuration"
exit 1
fi
sha=$(jq -r '.workflow_runs[0].head_sha' <<< $output)
echo "latest_green=$sha" >> $GITHUB_OUTPUT
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # tag=v6.0.2
with:
ref: ${{ steps.latest-green.outputs.latest_green }}
- name: Auth
uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 # tag=v3.0.0
with:
create_credentials_file: true # also sets GOOGLE_APPLICATION_CREDENTIALS
service_account: "github-automation-bot@gha-crc-prod.iam.gserviceaccount.com"
workload_identity_provider: "projects/695270090783/locations/global/workloadIdentityPools/github-automation/providers/crc-prod"
- name: "Set up gcloud"
uses: "google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db" # tag=v3.0.1
with:
skip_install: true
- name: Deploy Navtest on Cloud Build
run: |
gcloud builds submit \
--project robco-navtest \
--config .github/ci/deploy_navtest_cloudbuild.yaml
# Now we are ready to create the release.
- name: Run release_binary.sh
env:
REPO: ${{ github.repository }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
FULL_SHA: ${{ steps.latest-green.outputs.latest_green }}
run: ./.github/ci/release_binary.sh
================================================
FILE: .gitignore
================================================
.cache/
*.pyc
env/
# auto-generated by scripts/backport_kubeadm.sh
nsenter
# IntelliJ files
.project/
# Bazel
bazel-*
buildprofile.out
buildprofile.out.html
================================================
FILE: .pep8
================================================
[pep8]
aggressive=2
indent-size=2
max-line-length=80
================================================
FILE: BUILD.bazel
================================================
# Description:
# Root BUILD file for cloud-robotics
load("@bazel_gazelle//:def.bzl", "gazelle")
package(default_visibility = ["//visibility:public"])
exports_files([
"config.sh.tmpl",
"deploy.sh",
])
# Gazelle uses this to build importpath attributes.
# gazelle:prefix github.com/googlecloudrobotics/core
# Gazelle is used to generate BUILD.bazel files for WORKSPACE dependencies
# running this manually via "bazel run //:gazelle" will regenerate BUILD.bazel files that
# contain go-rules.
gazelle(
name = "gazelle",
)
# Libraries are named go_default_library, tests are named go_default_test.
# gazelle:go_naming_convention go_default_library
# We ignore the build files generated by bazel-deps as it doesn't use buildifer.
# gazelle:exclude third_party
# Also ignore the Go sources downloaded by src/go/deps.sh.
# gazelle:exclude src/.gopath
# Don't created build files for these examples
# gazelle:exclude docs/how-to/examples/greeter-service/proto/
================================================
FILE: CONTRIBUTING.md
================================================
# How to Contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution;
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.
## Code formatting
We have a pre-commit hook to check code formatting, which you can install with:
```
ln -s ../../scripts/pre-commit .git/hooks/
```
It depends on external tools for formatting, which you may be prompted to
install when it first runs.
## Community Guidelines
This project follows
[Google's Open Source Community Guidelines](https://opensource.google.com/conduct/).
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: METADATA
================================================
name: "Cloud Robotics"
third_party {
# https://nvd.nist.gov/products/cpe/search
security {
tag: "NVD-CPE2.3:cpe:/a:grafana:grafana:10.3.1"
tag: "NVD-CPE2.3:cpe:/a:kubernetes:ingress-nginx:1.8.4"
tag: "NVD-CPE2.3:cpe:/a:oauth2_proxy_project:oauth2_proxy:7.5.1"
tag: "NVD-CPE2.3:cpe:/a:prometheus:prometheus:2.49.1"
}
}
================================================
FILE: MODULE.bazel
================================================
bazel_dep(name = "aspect_bazel_lib", version = "2.21.2")
bazel_dep(name = "bazel_skylib", version = "1.8.1")
bazel_dep(name = "platforms", version = "1.0.0")
bazel_dep(name = "protobuf", version = "29.0", repo_name = "com_google_protobuf")
bazel_dep(name = "rules_oci", version = "2.0.1")
bazel_dep(name = "rules_pkg", version = "1.0.1")
bazel_dep(name = "rules_shell", version = "0.4.1")
# -- bazel_dep definitions -- #
non_module_deps = use_extension("//:non_module_deps.bzl", "non_module_deps")
use_repo(non_module_deps, "kubernetes_helm")
use_repo(non_module_deps, "kubernetes_helm3")
use_repo(non_module_deps, "hashicorp_terraform")
use_repo(non_module_deps, "com_github_kubernetes_sigs_application")
use_repo(non_module_deps, "ingress-nginx")
# End of extension `non_module_deps`
#######
# C++ #
#######
bazel_dep(name = "toolchains_llvm", version = "1.7.0")
# Inspect supported toolchains at https://github.com/bazel-contrib/toolchains_llvm/blob/master/toolchain/internal/llvm_distributions.bzl
llvm = use_extension("@toolchains_llvm//toolchain/extensions:llvm.bzl", "llvm")
llvm.toolchain(
llvm_version = "18.1.4",
)
use_repo(non_module_deps, "com_googleapis_storage_chrome_linux_amd64_sysroot")
llvm.sysroot(
label = "@com_googleapis_storage_chrome_linux_amd64_sysroot//:all_files",
targets = ["linux-x86_64"],
)
use_repo(llvm, "llvm_toolchain")
register_toolchains("@llvm_toolchain//:all")
bazel_dep(name = "rules_cc", version = "0.1.5")
######
# Go #
######
bazel_dep(name = "rules_go", version = "0.59.0", repo_name = "io_bazel_rules_go")
go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk")
go_sdk.download(version = "1.25.4")
bazel_dep(name = "gazelle", version = "0.47.0", repo_name = "bazel_gazelle")
go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps")
go_deps.from_file(go_mod = "//src:go.mod")
use_repo(
go_deps,
"com_github_cenkalti_backoff",
"com_github_form3tech_oss_jwt_go",
"com_github_fsnotify_fsnotify",
"com_github_getlantern_httptest",
"com_github_golang_glog",
"com_github_golang_mock",
"com_github_google_go_cmp",
"com_github_google_nftables",
"com_github_googlecloudrobotics_ilog",
"com_github_jaypipes_ghw",
"com_github_jaypipes_pcidb",
"com_github_motemen_go_loghttp",
"com_github_onsi_gomega",
"com_github_pkg_errors",
"com_github_prometheus_client_golang",
"com_github_spf13_cobra",
"com_github_spf13_pflag",
"com_google_cloud_go_compute_metadata",
"com_google_cloud_go_storage",
"in_gopkg_h2non_gock_v1",
"io_k8s_api",
"io_k8s_apiextensions_apiserver",
"io_k8s_apimachinery",
"io_k8s_cli_runtime",
"io_k8s_client_go",
"io_k8s_helm",
"io_k8s_klog",
"io_k8s_klog_v2",
"io_k8s_sigs_controller_runtime",
"io_k8s_sigs_kind",
"io_k8s_sigs_yaml",
"io_opencensus_go",
"io_opencensus_go_contrib_exporter_prometheus",
"io_opencensus_go_contrib_exporter_stackdriver",
"org_golang_google_api",
"org_golang_google_grpc",
"org_golang_google_protobuf",
"org_golang_x_crypto",
"org_golang_x_net",
"org_golang_x_oauth2",
"org_golang_x_sync",
)
#######
# OCI #
#######
oci = use_extension("@rules_oci//oci:extensions.bzl", "oci")
# gcloud container images describe gcr.io/distroless/base:latest --format='value(image_summary.digest)'
oci.pull(
name = "distroless_base",
digest = "sha256:b31a6e02605827e77b7ebb82a0ac9669ec51091edd62c2c076175e05556f4ab9",
image = "gcr.io/distroless/base",
platforms = ["linux/amd64"],
)
# gcloud container images describe gcr.io/distroless/cc:latest --format='value(image_summary.digest)'
oci.pull(
name = "distroless_cc",
digest = "sha256:8aad707f96620ee89e27febef51b01c6ff244277a3560fcfcfbe68633ef09193",
image = "gcr.io/distroless/cc",
platforms = ["linux/amd64"],
)
oci.pull(
name = "iptables_base",
digest = "sha256:656e45c00083359107b1d6ae0411ff3894ba23011a8533e229937a71be84e063",
image = "gcr.io/google-containers/debian-iptables",
platforms = ["linux/amd64"],
)
use_repo(
oci,
"distroless_base",
"distroless_base_linux_amd64",
"distroless_cc",
"distroless_cc_linux_amd64",
"iptables_base",
"iptables_base_linux_amd64",
)
================================================
FILE: README.md
================================================
# Cloud Robotics Core
Google's Cloud Robotics Core is an open source platform that provides
infrastructure essential to building and running robotics solutions for business
automation. Cloud Robotics Core makes managing robot fleets easy for developers,
integrators, and operators. It enables:
* packaging and distribution of applications
* secure, bidirectional robot-cloud communication
* easy access to Google Cloud services such as ML, logging, and monitoring.

Cloud Robotics Core is open source and pre-alpha. Support is currently limited
to a small set of early access partners. We will gladly accept contributions
and feedback, but we are making no stability or support guarantees at this
point in time.
# Documentation
Documentation of the platform and related How-to guides can be found at: https://googlecloudrobotics.github.io/core/
# Get Involved
If you want to get involved, please refer to [CONTRIBUTING.md](CONTRIBUTING.md),
reach out to [cloud-robotics-discuss@googlegroups.com](https://groups.google.com/forum/#!forum/cloud-robotics-discuss)
or ask Stack Overflow questions with [#google-cloud-robotics](https://stackoverflow.com/questions/tagged/google-cloud-robotics).
# Source Code
Most interesting bits are under `src`:
* app_charts: contains kubernetes resources for the core platform and apps
* bootstrap: provisioning for the cloud (terraform) and the robot (debian package)
* go/: the code that goes into images referenced from `app_charts`
The root directory contains a `deploy.sh` script for building and installing the software. More
details on that are in the [building from sources](how-to/deploy-from-sources) guide.
================================================
FILE: bazel/BUILD.bazel
================================================
exports_files([
"app.bzl",
"app_chart.bzl",
"container_push.bzl",
"repositories.bzl",
])
platform(
name = "linux_x86_64",
constraint_values = [
"@platforms//os:linux",
"@platforms//cpu:x86_64",
"@bazel_tools//tools/cpp:clang",
],
exec_properties = {
"container-image": "docker://gcr.io/cloud-robotics-releases/bazel-rbe-executor@sha256:3ee043e7a322caaff8c9edaa302373deb80e67ad6e42ae35d34b8f3597b8995e",
"OSFamily": "Linux",
},
parents = ["@local_config_platform//:host"],
)
================================================
FILE: bazel/BUILD.sysroot
================================================
filegroup(
name = "all_files",
srcs = glob(
[
"lib/x86_64-linux-gnu/ld*",
"lib/x86_64-linux-gnu/libc*",
"lib/x86_64-linux-gnu/libdl*",
"lib/x86_64-linux-gnu/libgcc*",
"lib/x86_64-linux-gnu/libm*",
"lib/x86_64-linux-gnu/libpthread*",
"lib/x86_64-linux-gnu/librt*",
"lib/x86_64-linux-gnu/libutil*",
"lib64/**",
"usr/include/*.h",
"usr/include/arpa/**",
"usr/include/asm-generic/**",
"usr/include/c++/**",
"usr/include/linux/**",
"usr/include/net/**",
"usr/include/netinet/**",
"usr/include/rpc/**",
"usr/include/sys/**",
"usr/include/x86_64-linux-gnu/**",
"usr/lib/gcc/**",
"usr/lib/x86_64-linux-gnu/*crt*.o",
"usr/lib/x86_64-linux-gnu/libc_nonshared.a",
"usr/lib/x86_64-linux-gnu/libc.a",
"usr/lib/x86_64-linux-gnu/libc.so",
"usr/lib/x86_64-linux-gnu/libdl*",
"usr/lib/x86_64-linux-gnu/libm*",
"usr/lib/x86_64-linux-gnu/libpthread*",
"usr/lib/x86_64-linux-gnu/libresolv.so",
"usr/lib/x86_64-linux-gnu/librt*",
"usr/lib/x86_64-linux-gnu/libutil*",
],
),
visibility = ["//visibility:public"],
)
================================================
FILE: bazel/app.bzl
================================================
load("//bazel/build_rules/app_chart:run_parallel.bzl", "run_parallel")
def app(name, charts, visibility = None):
"""Macro for a standard Cloud Robotics app.
This macro establishes two subrules for app name "foo":
- :foo.push pushes the Docker images for the app.
- :foo.yaml is a YAML file with the app CR that you need to push to
Kubernetes. Use k8s_object to push it, or compile it into a Helm chart.
Args:
name: string. Name of the app.
charts: list of targets. Helm charts for this app.
visibility: Visibility.
"""
pkg = Label("{}//{}".format(native.repository_name(), native.package_name()))
chart_labels = [pkg.relative(c) for c in charts]
run_parallel(
name = name + ".push",
targets = ["//{}:{}.push".format(c.package, c.name) for c in chart_labels],
visibility = visibility,
)
native.genrule(
# we name this differently than the file we produce to silence:
# target 'xxx.yaml' is both a rule and a file; please choose another name for the rule
name = name + ".manifest",
srcs = [
"//{}:{}.snippet-yaml".format(c.package, c.name)
for c in chart_labels
],
outs = [name + ".yaml"],
cmd = """cat - $(SRCS) > $@ < {} < $@
{target}:
inline: $$(base64 -w 0 $<)
EOF
""".format(name = name, target = chart),
)
================================================
FILE: bazel/build_rules/app_chart/BUILD.bazel
================================================
exports_files([
"cache_gcr_credentials.sh.tpl",
"Chart.yaml.template",
"push_all.sh.tpl",
"run_parallel.sh.tpl",
"values-cloud.yaml",
"values-robot.yaml",
])
================================================
FILE: bazel/build_rules/app_chart/Chart.yaml.template
================================================
apiVersion: v1
name: ${name}
version: ${version}
# Linter expects an icon.
icon: https://google.com/icon.png
================================================
FILE: bazel/build_rules/app_chart/cache_gcr_credentials.bzl
================================================
def _get_runfile_path(ctx, f):
"""Return the runfiles relative path of f."""
if ctx.workspace_name:
return "${RUNFILES}/" + ctx.workspace_name + "/" + f.short_path
else:
return "${RUNFILES}/" + f.short_path
def _impl(ctx):
runfiles = ctx.attr._sh_tpl.default_runfiles.files.to_list()
runfiles.append(ctx.attr.target.files_to_run.executable)
runfiles.extend(ctx.attr.target.default_runfiles.files.to_list())
variables = "PYTHON_RUNFILES=\"${RUNFILES}\" "
ctx.actions.expand_template(
template = ctx.file._sh_tpl,
substitutions = {
"%{gcr_registry}": ctx.attr.gcr_registry,
"%{command}": variables + _get_runfile_path(ctx, ctx.attr.target.files_to_run.executable),
},
output = ctx.outputs.executable,
is_executable = True,
)
return [DefaultInfo(runfiles = ctx.runfiles(files = runfiles))]
cache_gcr_credentials = rule(
attrs = {
"target": attr.label(
mandatory = True,
),
"gcr_registry": attr.string(
default = "gcr.io",
doc = "If set, credentials for this GCR registry's domain will be precached",
),
"_sh_tpl": attr.label(
default = Label("//bazel/build_rules/app_chart:cache_gcr_credentials.sh.tpl"),
allow_single_file = True,
),
},
executable = True,
implementation = _impl,
)
"""Cache gcr credentials before running a command.
This rule executes docker-credential-gcloud before running the command, and
replaces the binary with a helper that is safe for concurrent execution. Works
around around https://github.com/bazelbuild/rules_docker/issues/511.
Args:
target: A target that can be run with "bazel run".
gcr_registry: string. A GCR Docker registry (gcr.io/myproject).
"""
================================================
FILE: bazel/build_rules/app_chart/cache_gcr_credentials.sh.tpl
================================================
#!/usr/bin/env bash
set -eu
function guess_runfiles() {
pushd ${BASH_SOURCE[0]}.runfiles > /dev/null 2>&1
pwd
popd > /dev/null 2>&1
}
RUNFILES="${PYTHON_RUNFILES:-$(guess_runfiles)}"
# app() uses run_parallel() to push images to GCR, which relies on
# gcloud to get credentials. That, however, has a race condition:
# https://github.com/google/containerregistry/issues/115
# As such, we cache credentials and create a script that prints them to
# replace the racy credential helper. This script is added to to the start of
# PATH. This is harmless if run_parallel() is being used for something else.
# It also saves ~5s of CPU time.
tmp_bin=$(mktemp --tmpdir= -d deploy-XXXXXXXX-bin)
export PATH="${tmp_bin}:${PATH}"
function rm_tmp_bin {
rm -r "${tmp_bin}"
}
trap rm_tmp_bin EXIT
credential_script=$tmp_bin/docker-credential-gcloud
credential_file=$tmp_bin/docker-credential-gcloud.json
gcp_registry=$(echo "%{gcr_registry}" | cut -d'/' -f 1)
docker-credential-gcloud get <<<"https://${gcp_registry}" > "${credential_file}"
cat > "${credential_script}" << EOF
#!/bin/bash
cat "${credential_file}"
EOF
chmod +x "${credential_script}"
%{command} "$@"
================================================
FILE: bazel/build_rules/app_chart/push_all.bzl
================================================
load("//bazel:container_push.bzl", "container_push")
def _get_runfile_path(ctx, f):
"""Return the runfiles relative path of f."""
if ctx.workspace_name:
return "${RUNFILES}/" + ctx.workspace_name + "/" + f.short_path
else:
return "${RUNFILES}/" + f.short_path
def _impl(ctx):
runfiles = ctx.attr._sh_tpl.default_runfiles.files.to_list()
for target in ctx.attr.push_targets:
runfiles.append(target.files_to_run.executable)
runfiles.extend(target.default_runfiles.files.to_list())
ctx.actions.expand_template(
template = ctx.file._sh_tpl,
substitutions = {
"%{commands}": "\n".join(
[
"if [[ -z \"${TAG:-}\" ]]; then echo >&2 \"$0: TAG environment variable must be set when pushing images.\"; exit 1; fi",
] + [
"async {command} --repository=\"${{CONTAINER_REGISTRY}}/{repository}\" --tag=\"${{TAG}}\"".format(
command = _get_runfile_path(ctx, target.files_to_run.executable),
repository = repository,
)
for target, repository in zip(ctx.attr.push_targets, ctx.attr.images.keys())
],
),
},
output = ctx.outputs.executable,
is_executable = True,
)
return [DefaultInfo(runfiles = ctx.runfiles(files = runfiles))]
_push_all = rule(
attrs = {
# Implicit dependencies.
"push_targets": attr.label_list(
allow_files = True,
),
"images": attr.string_dict(
default = {},
),
"_sh_tpl": attr.label(
default = Label("//bazel/build_rules/app_chart:push_all.sh.tpl"),
allow_single_file = True,
),
},
executable = True,
implementation = _impl,
)
def push_all(name, images = {}, **kwargs):
"""Creates a script to push several container images to a docker registry.
The registry has to be specified as parameter when invoking the script.
Args:
images: dict. Repository names as keys and images to be pushed as values.
"""
if "push_targets" in kwargs:
fail("reserved for internal use by push_all macro", attr = "push_targets")
images = images or {}
push_targets = []
for repository, image in images.items():
push_target = name + "." + repository + ".push"
push_targets.append(push_target)
container_push(
name = push_target,
image = image,
)
_push_all(name = name, images = images, push_targets = push_targets, **kwargs)
================================================
FILE: bazel/build_rules/app_chart/push_all.sh.tpl
================================================
#!/usr/bin/env bash
set -eu
if [[ "$#" -lt 1 ]]; then
echo "Usage: $0 "
exit 1
fi
CONTAINER_REGISTRY="$1"
function guess_runfiles() {
pushd ${BASH_SOURCE[0]}.runfiles > /dev/null 2>&1
pwd
popd > /dev/null 2>&1
}
RUNFILES="${PYTHON_RUNFILES:-$(guess_runfiles)}"
PIDS=()
function async() {
# Launch the command asynchronously and track its process id.
PYTHON_RUNFILES=${RUNFILES} "$@" &
PIDS+=($!)
}
%{commands}
if [[ "${#PIDS[@]}" = 0 ]]; then
# It is valid to generate this script without pushing any images.
# Bash before v4.4 considers an empty array an unbound variable and would
# choke on the for-loop below.
exit 0
fi
# Wait for all of the subprocesses, failing the script if any of them failed.
exitcode=0
for pid in "${PIDS[@]}"; do
wait ${pid} || exitcode=$?
done
exit $exitcode
================================================
FILE: bazel/build_rules/app_chart/run_parallel.bzl
================================================
def _get_runfile_path(ctx, f):
"""Return the runfiles relative path of f."""
if ctx.workspace_name:
return "${RUNFILES}/" + ctx.workspace_name + "/" + f.short_path
else:
return "${RUNFILES}/" + f.short_path
def _impl(ctx):
runfiles = ctx.attr._sh_tpl.default_runfiles.files.to_list()
for target in ctx.attr.targets:
runfiles.append(target.files_to_run.executable)
runfiles.extend(target.default_runfiles.files.to_list())
ctx.actions.expand_template(
template = ctx.file._sh_tpl,
substitutions = {
"%{commands}": "\n".join([
"async \"%s\" \"$@\"" % _get_runfile_path(ctx, command.files_to_run.executable)
for command in ctx.attr.targets
]),
},
output = ctx.outputs.executable,
is_executable = True,
)
return [DefaultInfo(runfiles = ctx.runfiles(files = runfiles))]
run_parallel = rule(
attrs = {
"targets": attr.label_list(
allow_empty = False,
mandatory = True,
),
"_sh_tpl": attr.label(
default = Label("//bazel/build_rules/app_chart:run_parallel.sh.tpl"),
allow_single_file = True,
),
},
executable = True,
implementation = _impl,
)
"""Run multiple targets in parallel.
This rule builds a "bazel run" target that runs a series of subtargets in
parallel. If a subtarget has errors, execution results in an error when all
subtargets have completed.
Args:
targets: A list of targets that can be run with "bazel run".
"""
================================================
FILE: bazel/build_rules/app_chart/run_parallel.sh.tpl
================================================
#!/usr/bin/env bash
set -eu
function guess_runfiles() {
pushd ${BASH_SOURCE[0]}.runfiles > /dev/null 2>&1
pwd
popd > /dev/null 2>&1
}
RUNFILES="${PYTHON_RUNFILES:-$(guess_runfiles)}"
PIDS=()
function async() {
# Launch the command asynchronously and track its process id.
PYTHON_RUNFILES=${RUNFILES} "$@" &
PIDS+=($!)
}
%{commands}
# Wait for all of the subprocesses, failing the script if any of them failed.
exitcode=0
for pid in "${PIDS[@]}"; do
wait ${pid} || exitcode=$?
done
exit $exitcode
================================================
FILE: bazel/build_rules/app_chart/values-cloud.yaml
================================================
domain: "example.com"
project: "my-gcp-project"
deploy_environment: "GCP"
registry: "gcr.io/my-gcp-project"
robots: []
region: example-gcp-region
# Token Vendor feature flags
use_tv_k8s_verbose: false
================================================
FILE: bazel/build_rules/app_chart/values-robot.yaml
================================================
domain: "example.com"
project: "my-gcp-project"
deploy_environment: "GCP"
registry: "gcr.io/my-gcp-project"
robot:
name: ""
================================================
FILE: bazel/build_rules/copy.bzl
================================================
"""Macros to help rearrange files."""
def copy_files(name, srcs, outdir, visibility = None):
"""Creates copies of files within a directory.
For each source file, a copy with the same basename is created in outdir. A
filegroup target is created containing the new files.
Args:
srcs: list of files.
outdir: output directory.
visibility: handled through to the targets.
"""
outs = []
for src in srcs:
filename = src.split("/")[-1]
out = "%s/%s" % (outdir, filename)
native.genrule(
name = "%s_%s" % (name, filename),
srcs = [src],
outs = [out],
cmd = "cp $< $@",
visibility = visibility,
)
outs.append(out)
native.filegroup(
name = name,
srcs = outs,
visibility = visibility,
)
================================================
FILE: bazel/build_rules/helm_chart.bzl
================================================
def helm_chart(ctx, name, chart, files, templates, values, version, helm, out):
"""Starlark function that builds a helm chart.
Args:
name: string. Must match the name in Chart.yaml.
chart: file. The Chart.yaml file.
files: list of non-template files to put in files/.
templates: list of template files.
values: file. The values.yaml file.
version: string. Overwrites any version in Chart.yaml.
helm: file. The Helm tool.
out: file. The file that the chart is built to.
"""
cmd = """
mkdir {name} {name}/templates
cp {chart} {name}/Chart.yaml
cp {values} {name}/values.yaml
""".format(name = name, chart = chart.path, values = values.path)
if templates:
template_files = " ".join([t.path for t in templates])
# Use a single cp invocation to detect filename clashes.
cmd += "cp {templates} {name}/templates\n".format(name = name, templates = template_files)
if files:
cmd += "mkdir {name}/files\n".format(name = name)
files_locations = " ".join([f.path for f in files])
# Use a single cp invocation to detect filename clashes.
cmd += "cp {files} {name}/files\n".format(name = name, files = files_locations)
cmd += """
# Linter is too noisy, swallow its output when not failing
{helm} lint --strict {name} >/dev/null 2>&1 || \\
{helm} lint --strict {name}
{helm} package \\
--save=false --version={version} {name} \\
| (grep -v "Successfully packaged" || true)
mv $(basename {output}) {output}
rm -rf {name}""".format(name = name, version = version, helm = helm.path, output = out.path)
ctx.actions.run_shell(
inputs = [chart, values] + templates + (files or []),
tools = [helm],
outputs = [out],
command = cmd,
toolchain = None,
)
================================================
FILE: bazel/build_rules/helm_template.bzl
================================================
def helm_template(name, release_name, chart, values, namespace = None, helm_version = 2):
"""Locally expand a helm chart.
Args:
chart: build label, referencing the chart to expand.
values: label. File with expand-time values.
"""
tool = ""
cmd = ""
if helm_version == 2:
tool = "@kubernetes_helm//:helm"
cmd = "$(location {tool}) template --name {name} --namespace {namespace} --values $(location {values}) $(location {chart}) > $@".format(name = release_name, namespace = namespace or "default", chart = chart, tool = tool, values = values)
elif helm_version == 3:
tool = "@kubernetes_helm3//:helm"
cmd = "$(location {tool}) template {name} $(location {chart}) --namespace {namespace} --values $(location {values}) > $@".format(name = release_name, namespace = namespace or "default", chart = chart, tool = tool, values = values)
else:
fail("Unsupported helm version. Expected {2,3}, got ", helm_version)
native.genrule(
name = name,
srcs = [chart, values],
outs = [name + ".yaml"],
cmd = cmd,
tools = [tool],
)
================================================
FILE: bazel/container_push.bzl
================================================
load("@rules_oci//oci:defs.bzl", "oci_push")
def container_push(*args, **kwargs):
"""Creates a script to push a container image to a Docker registry. The
target name must be specified when invoking the push script."""
if "repository" in kwargs:
fail(
"Cannot set 'repository' attribute on container_push",
attr = "repository",
)
kwargs["repository"] = "IGNORE"
oci_push(*args, **kwargs)
================================================
FILE: bazel/debug_repository.bzl
================================================
"""Debug util for repository definitions."""
def debug_repository(repo, *fields):
"""debug_repository(repo) identifies which version of a repository has been
defined in the WORKSPACE by printing some of its fields. Example:
# at the bottom of the WORKSPACE file
load("//bazel:debug_repository.bzl", "debug_repository")
debug_repository("org_golang_x_net")
If needed, you can override the printed fields by passing additional parameters:
debug_repository("io_grpc_grpc_java", "patches", "urls")
"""
if len(fields) == 0:
fields = ["branch", "commit", "tag", "url", "urls"]
rule = native.existing_rule(repo)
if rule == None:
print(repo, "not found")
return
for f in fields:
if f in rule and len(rule[f]) > 0:
print(repo, f, rule[f])
================================================
FILE: config.sh.tmpl
================================================
#!/usr/bin/env bash
### Required settings ###
# Project ID of your Cloud Robotics GCP project. This project can be created
# for you as part of the Terraform setup, or it can be created and configured
# manually, then imported with `deploy.sh set-project` or `terraform import`.
GCP_PROJECT_ID=my-project
# GCP region and zone where resources should be created.
GCP_REGION=europe-west1
GCP_ZONE=europe-west1-c
### Optional settings ###
# The Docker registry all Cloud Robotics images are deployed to when installing
# from sources. It is ignored during binary installs.
# If unset, defaults to "gcr.io/${GCP_PROJECT_ID}"
#CLOUD_ROBOTICS_CONTAINER_REGISTRY=gcr.io/my-project
# A space-separated list of GCP alphanumeric project IDs for private image
# repositories. The installer will provision GCR access to these projects,
# both for the gke-node service account and for the robot service account.
#PRIVATE_DOCKER_PROJECTS="my-project my-other-project"
# A Google Group that should be a co-owner of the created GCP project.
#CLOUD_ROBOTICS_SHARED_OWNER_GROUP=my-group@googlegroups.com
# If you want to store your Terraform state in a GCS bucket, give a bucket name
# and a subdirectory of the bucket here. See
# https://www.terraform.io/docs/backends/types/gcs.html for docs.
#TERRAFORM_GCS_BUCKET="my-gcs-bucket"
#TERRAFORM_GCS_PREFIX="my/sub/directory"
# Symmetric cookie encryption key for the oauth2-proxy. Generate with:
# python -c 'import os,base64; print base64.urlsafe_b64encode(os.urandom(16))'
#CLOUD_ROBOTICS_COOKIE_SECRET=A_CyACoujODhfn2yDMy5tw==
# Oauth2 client ID and client secret from
# https://console.cloud.google.com/apis/credentials. If you leave these empty,
# you won't be able to log in with a browser (but CLI access will work fine).
#CLOUD_ROBOTICS_OAUTH2_CLIENT_ID=....apps.googleusercontent.com
#CLOUD_ROBOTICS_OAUTH2_CLIENT_SECRET=...
# Domain to be used for the ingress
# If unset, defaults to "www.endpoints.${GCP_PROJECT_ID}.cloud.goog"
#CLOUD_ROBOTICS_DOMAIN=www.example.com
# Enable google cloud robotics layer 2
APP_MANAGEMENT=true
# Enable google cloud robotics layer 1
ONPREM_FEDERATION=true
# Disable the secret manager integration by default
GKE_SECRET_MANAGER_PLUGIN=false
================================================
FILE: current_versions.txt
================================================
{
"cert-manager": "1.16.3",
"ingress-nginx": "1.8.4",
"oauth2-proxy": "7.5.1",
"stackdriver-logging-agent": "1.9.5"
}
================================================
FILE: deploy.sh
================================================
#!/bin/bash
#
# Copyright 2019 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Manage a deployment
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${DIR}/scripts/common.sh"
source "${DIR}/scripts/config.sh"
source "${DIR}/scripts/include-config.sh"
set -o pipefail -o errexit
PROJECT_NAME="cloud-robotics"
RUNFILES_ROOT="_main"
if is_source_install; then
# Not using bazel run to not clobber the bazel-bin dir
TERRAFORM="${DIR}/bazel-out/../../../external/+non_module_deps+hashicorp_terraform/terraform"
HELM_COMMAND="${DIR}/bazel-out/../../../external/+non_module_deps+kubernetes_helm/helm"
SYNK_COMMAND="${DIR}/bazel-bin/src/go/cmd/synk/synk_/synk"
else
TERRAFORM="${DIR}/bin/terraform"
HELM_COMMAND="${DIR}/bin/helm"
SYNK_COMMAND="${DIR}/bin/synk"
fi
TERRAFORM_DIR="${DIR}/src/bootstrap/cloud/terraform"
TERRAFORM_APPLY_FLAGS=${TERRAFORM_APPLY_FLAGS:- -auto-approve}
# utility functions
function include_config_and_defaults {
include_config "$1"
CLOUD_ROBOTICS_DOMAIN=${CLOUD_ROBOTICS_DOMAIN:-"www.endpoints.${GCP_PROJECT_ID}.cloud.goog"}
APP_MANAGEMENT=${APP_MANAGEMENT:-false}
ONPREM_FEDERATION=${ONPREM_FEDERATION:-true}
GKE_SECRET_MANAGER_PLUGIN=${GKE_SECRET_MANAGER_PLUGIN:-false}
# lets-encrypt is used as the default certificate provider for backwards compatibility purposes
CLOUD_ROBOTICS_CERTIFICATE_PROVIDER=${CLOUD_ROBOTICS_CERTIFICATE_PROVIDER:-lets-encrypt}
CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_COMMON_NAME=${CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_COMMON_NAME:-GCP_PROJECT_ID}
CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_ORGANIZATION=${CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_ORGANIZATION:-GCP_PROJECT_ID}
CLOUD_ROBOTICS_OWNER_EMAIL=${CLOUD_ROBOTICS_OWNER_EMAIL:-$(gcloud config get-value account)}
CLOUD_ROBOTICS_CTX=${CLOUD_ROBOTICS_CTX:-"gke_${GCP_PROJECT_ID}_${GCP_ZONE}_${PROJECT_NAME}"}
}
function update_config_var {
cloud_bucket="gs://${1}-cloud-robotics-config"
name="${2}"
value="${3}"
config_file="$(mktemp)"
gcloud storage cp "${cloud_bucket}/config.sh" "${config_file}" 2>/dev/null || return
save_variable "${config_file}" "${name}" "${value}"
gcloud storage mv "${config_file}" "${cloud_bucket}/config.sh"
}
function prepare_source_install {
# For whatever reasons different combinations of bazel environemnt seem to
# work differently wrt bazel-bin. This hack ensure that both synk and the
# files that synk will install are in bazel-bin
tmpdir="$(mktemp -d)"
bazel ${BAZEL_FLAGS} build //src/go/cmd/synk
cp -a ${DIR}/bazel-bin/src/go/cmd/synk/synk_/synk ${tmpdir}/synk
bazel ${BAZEL_FLAGS} build \
"@hashicorp_terraform//:terraform" \
"@kubernetes_helm//:helm" \
//src/app_charts/base:base-cloud \
//src/app_charts/platform-apps:platform-apps-cloud \
//src/app_charts:push \
//src/bootstrap/cloud:setup-robot.digest \
//src/go/cmd/setup-robot:setup-robot.push
mkdir -p ${DIR}/bazel-bin/src/go/cmd/synk/synk_/
mv -n ${tmpdir}/synk ${DIR}/bazel-bin/src/go/cmd/synk/synk_/synk
rm -f ${tmpdir}/synk
rmdir ${tmpdir} || /bin/true
# TODO(rodrigoq): the artifactregistry API would be enabled by Terraform, but
# that doesn't run until later, as it needs the digest of the setup-robot
# image. Consider splitting prepare_source_install into source_install_build
# and source_install_push and using Terraform to enable the API in between.
gcloud services enable artifactregistry.googleapis.com \
--project "${GCP_PROJECT_ID}"
# `setup-robot.push` is the first container push to avoid a GCR bug with parallel pushes on newly
# created projects (see b/123625511).
local oldPwd
oldPwd=$(pwd)
cd ${DIR}/bazel-bin/src/go/cmd/setup-robot/push_setup-robot.push.sh.runfiles/${RUNFILES_ROOT}
${DIR}/bazel-bin/src/go/cmd/setup-robot/push_setup-robot.push.sh \
--repository="${CLOUD_ROBOTICS_CONTAINER_REGISTRY}/setup-robot" \
--tag="latest"
# The tag variable must be called 'TAG', see cloud-robotics/bazel/container_push.bzl
# Running :push outside the build system shaves ~3 seconds off an incremental build.
cd ${DIR}/bazel-bin/src/app_charts/push.runfiles/${RUNFILES_ROOT}
TAG="latest" ${DIR}/bazel-bin/src/app_charts/push "${CLOUD_ROBOTICS_CONTAINER_REGISTRY}"
cd ${oldPwd}
}
function terraform_exec {
( cd "${TERRAFORM_DIR}" && ${TERRAFORM} "$@" )
}
function terraform_init {
local ROBOT_IMAGE_DIGEST
ROBOT_IMAGE_DIGEST=$(cat bazel-bin/src/bootstrap/cloud/setup-robot.digest)
# We only need to create dns resources if a custom domain is used.
local CUSTOM_DOMAIN
if [[ "${CLOUD_ROBOTICS_DOMAIN}" != "www.endpoints.${GCP_PROJECT_ID}.cloud.goog" ]]; then
CUSTOM_DOMAIN="${CLOUD_ROBOTICS_DOMAIN}"
fi
# This variable is set by src/bootstrap/cloud/run-install.sh for binary installs
local CRC_VERSION
if [[ -z "${TARGET}" ]]; then
# TODO(ensonic): keep this in sync with the nightly release script
VERSION=${VERSION:-"0.1.0"}
if [[ -d .git ]]; then
SHA=$(git rev-parse --short HEAD)
else
echo "WARNING: no git dir and no \$TARGET env set"
SHA="unknown"
fi
CRC_VERSION="crc-${VERSION}/crc-${VERSION}+${SHA}"
else
CRC_VERSION="${TARGET%.tar.gz}"
fi
cat > "${TERRAFORM_DIR}/terraform.tfvars" <> "${TERRAFORM_DIR}/terraform.tfvars" <> "${TERRAFORM_DIR}/terraform.tfvars" <> "${TERRAFORM_DIR}/terraform.tfvars"
local AR
for AR in "${ADDITIONAL_REGIONS[@]}"; do
local AR_NAME
local AR_REGION
local AR_ZONE
AR_NAME=$(jq -r .name <<<"${AR}")
AR_REGION=$(jq -r .region <<<"${AR}")
AR_ZONE=$(jq -r .zone <<<"${AR}")
cat >> "${TERRAFORM_DIR}/terraform.tfvars" <> "${TERRAFORM_DIR}/terraform.tfvars"
# Docker private projects
if [[ -n "${PRIVATE_DOCKER_PROJECTS:-}" ]]; then
cat >> "${TERRAFORM_DIR}/terraform.tfvars" < "${TERRAFORM_DIR}/backend.tf" </dev/null; do
sleep 1
i=$((i + 1))
if ((i >= 60)) ; then
# Try again, without suppressing stderr this time.
if ! kubectl --context "${CLUSTER_CONTEXT}" get serviceaccount default >/dev/null; then
die "'kubectl get serviceaccount default' failed"
fi
fi
done
local BASE_NAMESPACE
BASE_NAMESPACE="default"
# Remove old unmanaged cert
if ! kubectl --context "${CLUSTER_CONTEXT}" get secrets cluster-authority -o yaml | grep -q "cert-manager.io/certificate-name: selfsigned-ca"; then
kubectl --context "${CLUSTER_CONTEXT}" delete secrets cluster-authority 2> /dev/null || true
fi
# Delete permissive binding if it exists from previous deployments
if kubectl --context "${CLUSTER_CONTEXT}" get clusterrolebinding permissive-binding &>/dev/null; then
kubectl --context "${CLUSTER_CONTEXT}" delete clusterrolebinding permissive-binding
fi
local values
values=(
--set-string "domain=${CLUSTER_DOMAIN}"
--set-string "ingress_ip=${INGRESS_IP}"
--set-string "project=${GCP_PROJECT_ID}"
--set-string "region=${CLUSTER_REGION}"
--set-string "registry=${SOURCE_CONTAINER_REGISTRY}"
--set-string "owner_email=${CLOUD_ROBOTICS_OWNER_EMAIL}"
--set-string "app_management=${APP_MANAGEMENT}"
--set-string "onprem_federation=${ONPREM_FEDERATION}"
--set-string "certificate_provider=${CLOUD_ROBOTICS_CERTIFICATE_PROVIDER}"
--set-string "deploy_environment=${CLOUD_ROBOTICS_DEPLOY_ENVIRONMENT}"
--set-string "oauth2_proxy.client_id=${CLOUD_ROBOTICS_OAUTH2_CLIENT_ID}"
--set-string "oauth2_proxy.client_secret=${CLOUD_ROBOTICS_OAUTH2_CLIENT_SECRET}"
--set-string "oauth2_proxy.cookie_secret=${CLOUD_ROBOTICS_COOKIE_SECRET}"
--set "use_tv_verbose=${CRC_USE_TV_VERBOSE}"
)
${SYNK_COMMAND} --context "${CLUSTER_CONTEXT}" init
echo "synk init done"
echo "installing base-cloud to ${CLUSTER_CONTEXT}..."
${HELM_COMMAND} --kube-context "${CLUSTER_CONTEXT}" template -n base-cloud --namespace=${BASE_NAMESPACE} "${values[@]}" \
./bazel-bin/src/app_charts/base/base-cloud-0.0.1.tgz \
| ${SYNK_COMMAND} --context "${CLUSTER_CONTEXT}" apply base-cloud -n ${BASE_NAMESPACE} -f - \
|| die "Synk failed for base-cloud"
# This is the main region. Only run this here!
if [[ "${CLUSTER_NAME}" = "${PROJECT_NAME}" ]]; then
echo "installing platform-apps-cloud to ${CLOUD_ROBOTICS_CTX}..."
${HELM_COMMAND} --kube-context "${CLUSTER_CONTEXT}" template -n platform-apps-cloud "${values[@]}" \
./bazel-bin/src/app_charts/platform-apps/platform-apps-cloud-0.0.1.tgz \
| ${SYNK_COMMAND} --context "${CLUSTER_CONTEXT}" apply platform-apps-cloud -f - \
|| die "Synk failed for platform-apps-cloud"
fi
}
function helm_main_region {
local INGRESS_IP
INGRESS_IP=$(terraform_exec output ingress-ip | tr -d '"')
helm_region_shared \
"${CLOUD_ROBOTICS_CTX}" \
"${CLOUD_ROBOTICS_DOMAIN}" \
"${INGRESS_IP}" \
"${GCP_REGION}" \
"${GCP_ZONE}" \
"${PROJECT_NAME}"
}
function helm_additional_region {
local ar_description
ar_description="${1}"
local AR_NAME
local AR_REGION
local AR_ZONE
AR_NAME=$(jq -r .name <<<"${ar_description}")
AR_REGION=$(jq -r .region <<<"${ar_description}")
AR_ZONE=$(jq -r .zone <<<"${ar_description}")
local CLUSTER_NAME
CLUSTER_NAME="${AR_NAME}-ar-cloud-robotics"
local INGRESS_IP
INGRESS_IP=$(terraform_exec output -json ingress-ip-ar | jq -r ."\"${CLUSTER_NAME}\"")
helm_region_shared \
$(gke_context_name "${GCP_PROJECT_ID}" "${CLUSTER_NAME}" "${AR_REGION}" "${AR_ZONE}") \
"${AR_NAME}.${CLOUD_ROBOTICS_DOMAIN}" \
"${INGRESS_IP}" \
"${AR_REGION}" \
"${AR_ZONE}" \
"${CLUSTER_NAME}"
}
function helm_charts {
helm_main_region
local AR
for AR in "${ADDITIONAL_REGIONS[@]}"; do
helm_additional_region "${AR}"
done
}
# commands
function set_config {
local project_id="$1"
${DIR}/scripts/set-config.sh "${project_id}"
}
function create {
include_config_and_defaults $1
if is_source_install; then
prepare_source_install
fi
terraform_apply
helm_charts
}
function delete {
include_config_and_defaults $1
if is_source_install; then
bazel ${BAZEL_FLAGS} build "@hashicorp_terraform//:terraform"
fi
terraform_delete
}
# Alias for create.
function update {
create $1
}
# This is a shortcut for skipping Terraform config checks if you know the config has not changed.
function fast_push {
include_config_and_defaults $1
if is_source_install; then
prepare_source_install
fi
helm_charts
}
# This is a shortcut for skipping building and applying Terraform configs if you know the build has not changed.
function update_infra {
include_config_and_defaults $1
terraform_apply
}
# main
if [[ "$#" -lt 2 ]] || [[ ! "$1" =~ ^(set_config|create|delete|update|fast_push|update_infra)$ ]]; then
die "Usage: $0 {set_config|create|delete|update|fast_push|update_infra} "
fi
# log and call arguments verbatim:
log $2 $0 $1
"$@"
================================================
FILE: docs/.gitignore
================================================
# Files created when running Jekyll locally, following
# https://help.github.com/en/articles/setting-up-your-github-pages-site-locally-with-jekyll
.sass-cache/
_site/
Gemfile
Gemfile.lock
================================================
FILE: docs/_config.yml
================================================
theme: jekyll-theme-slate
================================================
FILE: docs/concepts/app-management.md
================================================
# App Management
The Cloud Robotics Core application management (Layer 2) makes it easy to define and deploy
arbitrary applications across a fleet of cloud and robot clusters. The
[Helm v2 chart format](https://helm.sh/docs/developing_charts/) is used to define an application
at the scope of a single cluster. Additional custom resources tie them together to a
cross-cluster application and define their deployment. It relies on the Cloud Robotics Core
[federation layer](federation.md) to distribute the resources to the right clusters.
## App resource
The App resource defines a Cloud Robotics Core application by simply describing Helm charts for
two classes of clusters: cloud and robots. Charts may be specified inline as base64-encoded Helm
chart files or by referencing a Helm repository. App resources will be deployed to the cloud
cluster.
Example 1: application referencing charts from helm repositories
```yaml
apiVersion: apps.cloudrobotics.com/v1alpha1
kind: App
metadata:
name: ros-v1
spec:
repository: https://my.repo
version: 1.2.1
components:
cloud:
name: ros-cloud
robot:
name: ros-robot
```
Example 2: application using inline as base64-encoded charts (development workflow)
```yaml
apiVersion: apps.cloudrobotics.com/v1alpha1
kind: App
metadata:
name: ros-v1
spec:
components:
cloud:
chart:
inline:
robot:
chart:
inline:
```
Right now we only have bazel build rules to produce inline charts.
## AppRollout Resource
An AppRollout describes how a defined App should be deployed across a fleet of clusters. It allows
to flexibly select robots which should run an application and to inject fine-grained configuration.
Example 3: AppRollout with different configuration options per target
```yaml
apiVersion: apps.cloudrobotics.com/v1alpha1
kind: AppRollout
metadata:
name: ros-stable
labels:
app.kubernetes.io/name: ros
role: navtest
release: stable
spec:
appName: ros-v1
cloud:
values:
override: foo
robots:
- selector:
matchLabels:
model: mir100
values:
override1: bar
- selector:
matchLabels:
model: mir200
values:
override1: baz
version: v1.2.2 # Chart version override for canarying
```
AppRollouts are deployed into the cloud cluster, where a controller (app-rollout-controller) handles them.
The controller applies the specified selectors and creates or updates the internal
ChartAssignments. A ChartAssignment represents a single instance of a chart that should be
installed into a single cluster. These internal objects describe the task of installing parts of
the application.
The federation layer will sync ChartAssignments to robots as needed. The actual
installation is done by another controller (chart-assignment-controller), this
time running both in the cloud and on the robots. The AppRollout controller will
watch the status updates and consolidate the information into status updates on
the AppRollout.
## Sharing secrets
If you create a Secret in the `default` namespace labelled
`cloudrobotics.com/copy-to-chart-namespaces=true`, it will be copied into all
namespaces created by the chart-assignment-controller. This is useful for
cluster-specific license keys that can be used by applications.
## Opt a pod out of status checking
During rollout, the chart-assignment-controller checks for Pods in the rollout being `Running` or `Completed`.
In some cases, this check is not necessary or might need to be opted out of.
In this case, add a label `cloudrobotics.com/opt-out-error-checking=true` to your pods. Adding this
instructs the chart-assignment-controller to not block the status from reaching `Ready`.
================================================
FILE: docs/concepts/config.md
================================================
# Project configuration
The project configuration that one has entered during the initial setup is
stored with the project in GCS. One can look at the options with the following
command:
```shell
gcloud storage cat gs://${PROJECT_ID}-cloud-robotics-config/config.sh
```
The settings contained in the config file are used by terraform to setup the
project infrastructure and used by the cloud and chart-assignment-controller services running
in kubernetes to configure apps.
The terraform support is encapsulated in deploy.sh that creates a temporary
`terraform.tfvars` file.
To support configuring apps, we pass the settings to app-rollout-controller where they are
provided as additional variables for helm templating. The command below prints
the settings we pass to app-rollout-controller:
```shell
kubectl get deployment app-rollout-controller -o=jsonpath='{.spec.template.spec.containers[0].args[0]}'
```
================================================
FILE: docs/concepts/device_identity.md
================================================
# Device Identity
Device Identity, part of Layer 1, provides an identity for robot clusters and
services to integrate those identities into a cloud based IAM system.
The following components are part of the whole setup:
* Cloud:
* `IAM`: Cloud Identity and Access Management
* `Kubernetes configmaps`: used as a Key Management Service
* `Token Vendor`: token exchange service for OAuth2 service accounts
* `robot service-account`: a GCP IAM service account that has the union of
permissions that applications running on the robot cluster require
* Robot cluster (on-prem or edge):
* `Metadata Server`: provides default credentials + project metadata
* `Setup`: special app used to register the workcell
* ``: any app accessing the cloud
The following chapters explain the flows in more detail. Further information
about the Token Vendor can be found in its
[docs](https://github.com/googlecloudrobotics/core/tree/master/src/go/cmd/token-vendor/README.md)
## Setup
The setup flow is used to register a new robot cluster to a cloud project.

* (1) (Admin-)user runs `Setup`, which generates a RSA key-pair and stores it as
a K8S secret
* (2) `Setup` uploads the public key to `Token Vendor`
* (3) `Token Vendor` stores key in `Kubernetes`
## Authentication
The authentication flow is used to transparently make cloud API calls work for
on-prem robot clusters.

* `` creates an API client without loading any custom key material
* (1) API client library probes `Metadata Server` to get ADCs (Application
Default Credentials)
* (2) `Metadata Server` talks to `Token Vendor` get an Access Token for the
`robot service-account`
* (3) `Token Vendor` verifies the key the request has been signed with against
the device registry
* (4) `Token Vendor` gets an Access Token for the `robot service-account` from
`IAM`
* `Token Vendor` returns Access Token through `Metadata Server` to the
`` and that can use it to call Cloud APIs under the scope of the
`robot service-account`
================================================
FILE: docs/concepts/federation.md
================================================
# Federation
Federation, part of Layer 1, is responsible for synchronizing the state between robot and cloud
clusters. Configuration state in Cloud Robotics Core is primarily expressed through [custom
resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
by platform and user applications alike. Our federation system enables other components to use
custom resources locally without needing to be aware of the multi cluster setup and the quality
of the network connection.
## Semantics
A Kubernetes resource is typically divided into a [“spec” and a “status”](https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/#object-spec-and-status)
section. The `spec` section expresses the intent of the resource, typically authored by a user
or another application along with all its metadata.
The `status` section must generally only be written to by the controller that is responsible for
realizing the specification. Consequently, `spec` and `status` typically each have one distinct
author.
For federating resources, this means that `spec` and `status` of a resource are owned by at most
one cluster respectively (possibly the same one). The cluster owning the spec is also the main
owner of the resource overall and controls its lifecycle, i.e. deletion.
A resource’s spec is always synced from an upstream cluster to a downstream
cluster and its status synced back from downstream to upstream.
All resources of a specific type may either be synchronized to all robots or to exactly one robot.
There is no direct synchronization between robots. However, a robot may create a resource in the
cloud cluster that will be distributed to other robots.
If a resource owned by the upstream cluster has been synchronized to one or more
downstream clusters, it can only be permanently deleted upstream: if deleted
downstream, it will be recreated. If deleted in the upstream cluster, it will be
asynchronously deleted in other clusters that hold a copy of the resource.
Upstream deletion can complete before the downstream resource is deleted.
## cr-syncer
The cr-syncer component (Custom Resource Syncer) is a controller that runs inside each robot
cluster. It is connected to the Kubernetes API servers of the cloud and the robot cluster alike
and continuously watches for updates on custom resources. The controller contains retry and resync
logic to address intermittent connectivity.

The behavior of the cr-syncer can be configured per custom resource definition (CRD) by setting
annotations on its CRD:
* `cr-syncer.cloudrobotics.com/spec-source`: may be `cloud` or `robot`. It determines which
cluster type owns metadata, spec, and lifecycle of all resources of the CRD. It implies
that the other cluster type owns the status section.
* `cr-syncer.cloudrobotics.com/filter-by-robot-name`: a boolean that determines whether resources
will be synced to all robots or just a single one. An individual resource is labeled with
`cloudrobotics.com/robot-name` to indicate which robot it should be synced to. If the label
is missing on a resource, it will not be synced at all.
* `cr-syncer.cloudrobotics.com/status-subtree`: a string key, which defines which sub-section of
the resource status is synced from the downstream cluster. This lets you split
a resource’s status into `robot` and `cloud` sections, for example. Using this
annotation is generally discouraged as it likely points to a flaw in the
modeling of the respective CRD.
## Deletion
When the cr-syncer sees a resource in the downstream cluster with no
corresponding resource in upstream cluster, it deletes it. This handles orphaned
resources when the upstream resource was deleted while the cr-syncer was
restarting. It also means that you can't create a resource directly in the
downstream cluster. The upstream resource is identified using the namespace and
name, but not the UID, so deletion and recreation upstream may result in an
update in the downstream cluster.
> **Note**: if you create a resource directly in the downstream cluster, the
> behavior will depend on how the CRD is annotated. If `filter-by-robot-name`
> is false, the cr-syncer will delete all downstream resources that don't
> correspond to upstream resources. This means that by listing CRs in the
> upstream cluster, you can reason about which CRs will exist in the downstream
> cluster.
>
> If `cr-syncer.cloudrobotics.com/filter-by-robot-name` is true, then the
> cr-syncer will ignore any downstream resources that are not labelled with a
> matching robot name. This means that a robot can run ChartAssignments that are
> synced from the cloud as well as those created directly in the robot cluster.
In some cases, downstream deletion may be blocked. For example, if we have
deleted an upstream ChartAssignment, but the chart-assignment-controller has failed to remove
its finalizer from the downstream ChartAssignment. This edge case leads to
surprising behavior:
- An upstream ChartAssignment can be recreated before the downstream
ChartAssignment is deleted.
- The old status from the downstream cluster will be synced to the new upstream
ChartAssignment.
If needed, this can be detected by watching the downstream cluster after
deleting the resource from the downstream cluster. The situation will clean up
once downstream deletion is complete.
Note: previously, the cr-syncer used finalizers to block upstream deletion
until the downstream resource was deleted. This gave the original deleter more
information: for example, once an AppRollout had been deleted in the cloud, it
means that all robots have terminated the app's pods. However, this caused
problems with offline or renamed clusters: an admin would have to manually clean
up the old finalizers. The new asynchronous behavior is not affected by offline
clusters.
## Resource generations
Custom resources have a field `.metadata.generation` that starts at 1 and is
incremented when the resource changes. Specifically, if the CRD enables the
/status subresource, the generation increases by 1 every time the resource spec
changes, but not when the status changes. The resource controller can set
`.status.observedGeneration` to the latest generation it has observed, so the
user can change the spec, then wait for `observedGeneration` to catch up before
looking at the status. For example:
* Create a Deployment for one pod (generation=1), and wait for the status to be Ready.
* Change the Deployment's image reference (generation=2): the status is still
Ready, but this refers the old spec (observedGeneration=1).
* Wait for the status to update (observedGeneration=2): now the status is
non-ready, referring to the newer spec.
* Wait for the status to be Ready. The new image is now running.
`generation` and `observedGeneration` can **only be compared in the downstream
cluster**. As the generation is managed by the Kubernetes apiserver, the
cr-syncer cannot guarantee that the upstream generation matches the downstream
generation. On the other hand, `observedGeneration` will be copied from
downstream to upstream with the rest of `.status`. This means that `generation`
is cluster-specific but `observedGeneration` always refers to the downstream
generation.
================================================
FILE: docs/developers/debug-auth.md
================================================
# Debugging authentication problems
Useful tips for working with Authentication and Authorization systems.
## Run a sample request with various credentials
You can call Cloud APIs with curl to see whether authorization works.
### Your own credentials
```bash
PROJECT_NUMBER=201199916163
curl -v -H "Content-Type: application/json" \
-H "Authorization: Bearer $(gcloud auth application-default print-access-token)" \
"https://cloudroboticssensordata.googleapis.com/v1eap/projects/${PROJECT_NUMBER}/sensors"
```
### Service account JSON file
You can create a JSON file with the robot account's credentials on
the [Cloud console's credentials page](https://console.cloud.google.com/apis/credentials).
```bash
PROJECT_NUMBER=201199916163
JSON_CREDENTIALS=/tmp/my-project-b7364a68fa92.json
curl -v -H "Content-Type: application/json" \
-H "Authorization: Bearer $(GOOGLE_APPLICATION_CREDENTIALS=${JSON_CREDENTIALS} gcloud auth application-default print-access-token)" \
"https://cloudroboticssensordata.googleapis.com/v1eap/projects/${PROJECT_NUMBER}/sensors"
```
### Get an OAuth token from IAM
The token vendor doesn't have its own keys, but instead calls IAM's
generateAccessToken method. You can emulate its behavior by using the [API
Explorer](https://developers.google.com/apis-explorer/#search/iam%20credentials/iamcredentials/v1/iamcredentials.projects.serviceAccounts.generateAccessToken)
to call `iamcredentials.projects.serviceAccounts.generateAccessToken`. The name
parameter is
`projects/-/serviceAccounts/robot-service@my-project.iam.gserviceaccount.com`,
and the `scope` is `https://www.googleapis.com/auth/cloud-platform`.
You can pass the returned access token in an Authorization header as above.
## Check whether the client's request is well-formed and authenticated
The easiest way to verify that metadata server and the gRPC client library
are doing the right thing is to use a logging HTTP server as the gRPC server.
Instead of setting the gRPC host to the Cloud API server
(`cloudroboticssensordata.googleapis.com`), you set it to an
HTTPS-capable server under your control. You need HTTPS support because
otherwise the gRPC library will rightfully decline to send access tokens.
Luckily, your Cloud Robotics Core setup already runs an HTTPS server. Suppose
you're calling the gRPC service
`google.cloud.robotics.sensordata.v1eap.SensorDataService`. You can
hook a very simple Python HTTP server into your cloud nginx setup like
this:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: debug
spec:
server.py: |
import BaseHTTPServer
import SocketServer
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
print "Got request for ", self.path, " with auth ", self.headers.get('Authorization')
def do_POST(self):
print "Got request for ", self.path, " with auth ", self.headers.get('Authorization')
httpd = SocketServer.TCPServer(("", 8080), MyHandler)
httpd.serve_forever()
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: debug
spec:
selector:
matchLabels:
app: debug
replicas: 1
template:
metadata:
labels:
app: debug
spec:
containers:
- name: python
image: python:2
args: ["python", "/src/server.py"]
volumeMounts:
- name: src-volume
mountPath: /src
volumes:
- name: src-volume
configMap:
name: debug
---
apiVersion: v1
kind: Service
metadata:
labels:
app: debug
name: debug
spec:
ports:
- name: http
port: 8082
protocol: TCP
targetPort: 8080
selector:
app: debug
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/backend-protocol: HTTP
name: debug
spec:
ingressClassName: nginx
rules:
- host: www.endpoints.my-project.cloud.goog
http:
paths:
- path: /google.cloud.robotics.sensordata.v1eap.SensorDataService
pathType: Prefix
backend:
service:
name: debug
port
number: 8080
```
This will log the Authorization header to the pod's stdout, so you can view it
with `kubectl logs`. Save the token to a file (don't paste it into the command
line because it will end up in your shell history).
## Checking tokens with the tokeninfo service
You can check the token's contents and sanity with Google's tokeninfo endpoint:
```shell
curl https://oauth2.googleapis.com/tokeninfo?access_token=$(cat /tmp/token.txt)
```
================================================
FILE: docs/how-to/connecting-robot.md
================================================
# Connecting a robot to the cloud
Estimated time: 10 min
This page describes how to connect a Kubernetes cluster on a robot running Ubuntu 20.04 to the cloud.
Once you've done this, you can:
* Run a private Docker container from the Google Container Registry
* Securely communicate with cloud services
* See logs from the robot in the Cloud Console
## Setting up the GCP project
1. If you haven't already, complete the [Setting up the GCP project](../quickstart.md) steps.
1. On the computer you used to set up the cloud project, generate an access token, which you'll use to give the robot access to the cloud:
```shell
gcloud auth application-default print-access-token
```
> **Note:** If you want to reduce the risk that your cloud project is
> compromised using this token during its 1h lifetime, you can generate a less
> privileged service account token:
>
> ```
> SA="human-acl@${PROJECT_ID}.iam.gserviceaccount.com"
> gcloud iam service-accounts add-iam-policy-binding "${SA}" \
> --role=roles/iam.serviceAccountTokenCreator \
> --project="${PROJECT_ID}" --member="user:${YOUR_EMAIL_ADDRESS:?}"
> gcloud auth print-access-token --impersonate-service-account="${SA}"
> ```
>
> If you see `ERROR: Failed to impersonate ...`, wait a few minutes for the IAM
> policy to propagate.
>
> You can ignore the "WARNING: This command is using service account
> impersonation."
## Installing the cluster on the robot
## Installing Kubernetes
You'll need to install a Kubernetes cluster on the robot before you can connect it to the cloud. The cluster manages and supports the processes that communicate with the cloud.
Please see external references for setting up k8s. For simplicity we recommend
[k3s](https://k3s.io/) or a single node
[kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/)
cluster (untested).
## Setting up the robot
1. Set up the robot cluster to connect to the cloud. When running `setup_robot.sh`, you'll need to enter the access token you generated earlier. You may find it easiest if you SSH into the robot from the workstation you used to set up the project.
```shell
mkdir -p ~/cloud-robotics-core
cd ~/cloud-robotics-core
curl https://raw.githubusercontent.com/googlecloudrobotics/core/master/src/bootstrap/robot/setup_robot.sh >setup_robot.sh
bash setup_robot.sh my-robot --project ${PROJECT_ID} \
--robot-type my-robot-type
```
Set `${PROJECT_ID}` to your GCP project ID. When prompted for an access token, provide the authentication token you generated earlier.
> **Note:** `my-robot-type` is a placeholder and you can ignore it for now.
## What's next
* [Using Cloud Storage from a robot](using-cloud-storage.md).
================================================
FILE: docs/how-to/creating-declarative-api.md
================================================
# Creating a declarative API
In this guide we will use a Kubernetes-style declarative API to interface to an external Charge Service for a robot.
This API is built around the concept of a ChargeAction resource, which instructs a robot to drive to a charger.
While the robot is charging, the status of the ChargeAction resource is kept up-to-date and can be observed.
## Motivation
RPC-based systems like ROS's [actionlib](http://wiki.ros.org/actionlib), while proven to be scalable, maintainable and useful, leave a few things to be desired:
1. **Synchronization**. The intent for a controller is stored in-memory in multiple components and we rely on correct synchronization.
For example, the motion planner sends the "turn wheel 3 times per second" message to the wheel actuator, then trusts that the wheel actuator will have received the intent and waits for it to act on the shared intent.
If a second process (such as an emergency stop) overwrites the intent of the wheel actuator, there's no standard channel to notify the motion planner.
2. **Persistence**. Since the intent is stored in-memory, it is lost when any process restarts.
This is the core reason that software in ROS systems can't be updated on the fly.
3. **Inspection**. For debugging, a coherent view into the current system intent would be great.
In RPC-based APIs, the intent is often updated differentially (eg "a little more to the left"), so our only hope of debugging is to log all messages ever sent.
In a declarative API, all actions and feedback are stored in a shared database—an approach built on Kubernetes' experience building robust distributed systems—which addresses these issues.
The latency added by going through the shared database means that
declarative APIs are best suited to latency-tolerant applications like
high-level control.
## Prerequisites
* Completed the [Quickstart Guide](../quickstart.md), after which the GCP project is set up and `gcloud-sdk` and `kubectl` are installed and configured.
* `docker` is installed and configured on the workstation ([instructions](https://docs.docker.com/install/linux/docker-ce/ubuntu/)).
Create a directory for the code examples of this guide, e.g.:
```shell
mkdir charge-service
cd charge-service
```
Set your GCP project ID as an environment variable:
```
export PROJECT_ID=[YOUR_GCP_PROJECT_ID]
```
All files created in this tutorial can be found in
[docs/how-to/examples/charge-service](https://github.com/googlecloudrobotics/core/tree/master/docs/how-to/examples/charge-service).
If you download the files, you have to replace the placeholder `[PROJECT_ID]` with your GCP project ID:
```shell
sed -i "s/\[PROJECT_ID\]/$PROJECT_ID/g" charge-controller.yaml
```
## Installing metacontroller
This tutorial is based on [metacontroller](https://metacontroller.github.io/metacontroller/intro.html), an add-on for Kubernetes that makes it easy to write and deploy [custom controllers](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#custom-controllers).
Custom controllers implement the logic behind a declarative API.
First, make sure that `kubectl` points to the correct GKE cluster:
```shell
kubectl config get-contexts
```
If the correct cluster is not marked with an asterisk in the output, you can switch to it with `kubectl config use-context [...]`.
Now install metacontroller to the cloud-cluster:
```shell
kubectl create namespace metacontroller
kubectl apply -f https://raw.githubusercontent.com/metacontroller/metacontroller/master/manifests/production/metacontroller-rbac.yaml
kubectl apply -f https://raw.githubusercontent.com/metacontroller/metacontroller/master/manifests/production/metacontroller-crds-v1.yaml
kubectl apply -f https://raw.githubusercontent.com/metacontroller/metacontroller/master/manifests/production/metacontroller.yaml
```
Let's check that all resources came up:
```console
> kubectl get pods --namespace metacontroller
NAME READY STATUS RESTARTS AGE
metacontroller-0 1/1 Running 0 1m
```
You can learn more details in the metacontroller's [install instructions](https://metacontroller.github.io/metacontroller/guide/install.html).
> **Limitations of metacontroller**:
> Writing custom controllers with metacontroller is easy, and you can use
> whatever programming language you prefer.
> However, it has some limitations.
>
> 1. metacontroller can't directly detect changes in external state, although
> you can configure it to periodically reconcile your resources with external
> systems. This introduces latency corresponding to the reconciliation
> frequency.
> 1. The information available to your controller is limited.
> You can't use metacontroller to create a controller that only acts on a
> single resource out of many (for example, a controller that only executes
> the highest-priority action).
>
> For advanced use cases, writing a controller in Golang offers more
> flexibility. See
> [sample-controller](https://github.com/kubernetes/sample-controller) for an
> example.
## Defining the controller logic
The core of a declarative API is the controller logic, which defines how the resources should be handled and reports the current status.
For the Charge Service, we've implemented the logic in a Python script.
Download [server.py](examples/charge-service/server.py):
```shell
curl -O https://raw.githubusercontent.com/googlecloudrobotics/core/master/docs/how-to/examples/charge-service/server.py
```
This Python program implements a server that listens on port 80 for incoming HTTP POST requests from metacontroller.
The controller logic is contained in the `sync()` method, which handles new ChargeActions by calling `charge_service.start_charging()`, and handles in-progress ChargeActions by updating the status.
[embedmd]:# (examples/charge-service/server.py python /.*state = current_status.get/ /return.*status.*/)
```python
state = current_status.get("state", "CREATED")
if state == "CREATED":
# The ChargeAction has just been created. Use the external Charge Service
# to start charging. Store the request ID in the status so we can use it
# to check the state of the charge request.
request_id = self.charge_service.start_charging()
desired_status["state"] = "IN_PROGRESS"
desired_status["request_id"] = request_id
elif state == "IN_PROGRESS":
try:
# Get the progress of the charge request from the external service.
progress = self.charge_service.get_progress(
current_status["request_id"])
desired_status["charge_level_percent"] = progress
if progress == 100:
# Charging has completed.
desired_status["state"] = "OK"
except ValueError as e:
# The charge request was not found. This could be because the robot was
# restarted during a charge, and the request was forgotten.
desired_status["state"] = "ERROR"
desired_status["message"] = str(e)
elif state in ["OK", "CANCELLED", "ERROR"]:
# Terminal state, do nothing.
pass
else:
desired_status["state"] = "ERROR"
desired_status["message"] = "Unrecognized state: %r" % state
return {"status": desired_status, "children": []}
```
## Dockerizing the service
Next, to prepare our controller logic for deployment in the cloud, we package it as a Docker image. Make sure that the docker daemon is running and that your user has the necessary privileges:
```shell
docker run --rm hello-world
```
If this command fails, make sure Docker is installed according to the [installation instructions](https://docs.docker.com/install/linux/docker-ce/ubuntu/).
In the same directory as `server.py`, create a `Dockerfile` with the following contents:
[embedmd]:# (examples/charge-service/Dockerfile dockerfile)
```dockerfile
FROM python:alpine
WORKDIR /data
COPY server.py ./
CMD [ "python", "-u", "./server.py" ]
```
(Note: the `-u` option disables line-buffering; Python's line-buffering can prevent output from appearing immediately in the Docker logs.)
To build the Docker image, run:
```shell
docker build -t charge-controller .
```
You should see `Successfully tagged charge-controller:latest`. You can run the container locally with:
```shell
docker run -ti --rm -p 8000:8000 charge-controller
```
Then, from another terminal on the same workstation, send a request with an empty `parent` object:
```shell
curl -X POST -d '{"parent": {}, "children": []}' http://localhost:8000/
```
You should see a response like:
```json
{"status": {"state": "IN_PROGRESS", "request_id": "2423e70c-9dc7-47ac-abcb-b2ef0cbc676c"}, "children": []}
```
The response indicates that the controller would set `"state": "IN_PROGRESS"` on a newly-created ChargeAction.
## Uploading the Docker image to the cloud
In order to be able to run the server as a container in our cloud cluster, we need to upload the Docker image to our GCP project's private [container registry](https://cloud.google.com/container-registry/docs/pushing-and-pulling).
Enable the Docker credential helper:
```shell
gcloud auth configure-docker
```
Tag the image and push it to the registry:
```shell
docker tag charge-controller gcr.io/$PROJECT_ID/charge-controller
docker push gcr.io/$PROJECT_ID/charge-controller
```
The image should now show up in the [Container Registry](https://console.cloud.google.com/gcr).
## Deploying the declarative API in the cloud
Create a file called `charge-crd.yaml` with the following contents:
[embedmd]:# (examples/charge-service/charge-crd.yaml yaml)
```yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: chargeactions.example.com
annotations:
cr-syncer.cloudrobotics.com/spec-source: cloud
spec:
group: example.com
names:
kind: ChargeAction
plural: chargeactions
singular: chargeaction
scope: Namespaced
versions:
- name: v1
served: true
storage: true
subresources:
status: {}
schema:
openAPIV3Schema:
type: object
x-kubernetes-preserve-unknown-fields: true
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cloud-robotics:cr-syncer:chartaction
labels:
cr-syncer.cloudrobotics.com/aggregate-to-robot-service: "true"
rules:
- apiGroups:
- example.com
resources:
- chargeactions
verbs:
- get
- list
- watch
- update
```
This is a [custom resource](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) definition (CRD) for a resource called ChargeAction.
This simple example just describes the name and version of the API, but CRDs can also define schemas for the resources.
The ClusterRole configures [role-based access control](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) to let the robot access the ChargeActions.
Don't worry about the `cr-syncer.cloudrobotics.com/spec-source` annotation for now, as it'll be explained later in the tutorial.
Next, create a file called `charge-controller.yaml` with the following contents, replacing `[PROJECT_ID]` with your GCP project ID:
[embedmd]:# (examples/charge-service/charge-controller.yaml yaml)
```yaml
apiVersion: metacontroller.k8s.io/v1alpha1
kind: CompositeController
metadata:
name: charge-controller
spec:
generateSelector: true
parentResource:
apiVersion: example.com/v1
resource: chargeactions
resyncPeriodSeconds: 1
hooks:
sync:
webhook:
url: http://charge-controller.default:8000/sync
---
apiVersion: v1
kind: Service
metadata:
name: charge-controller
spec:
selector:
app: charge-controller
ports:
- port: 8000
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: charge-controller
spec:
replicas: 1
selector:
matchLabels:
app: charge-controller
template:
metadata:
labels:
app: charge-controller
spec:
containers:
- name: controller
image: gcr.io/[PROJECT_ID]/charge-controller
ports:
- containerPort: 8000
```
This file contains the information needed by Kubernetes and metacontroller to handle ChargeAction resources.
In the following, we will go over it bit by bit assuming basic familiarity with the [YAML format](https://en.wikipedia.org/wiki/YAML).
We define three Kubernetes resources:
* The *CompositeController* tells metacontroller to send ChargeAction resources to the charge-controller Service.
* The *Service* defines how the HTTP server is exposed within the cluster.
* The *Deployment* describes the Docker container to run.
Metadata, labels, and selectors are used to tie the three resources together.
A detailed explanation of the Kubernetes resources is out of scope for this guide, check out the [Kubernetes docs](https://kubernetes.io/docs/home/) or [metacontroller User Guide](https://metacontroller.github.io/metacontroller/guide.html) to get started.
There are a few points worth mentioning, though:
* In the Deployment, don't forget to replace `[PROJECT_ID]` with your GCP project ID.
* The CompositeController sets `resyncPeriodSeconds: 1`.
This tells metacontroller to check each ChargeAction every second.
This allows `server.py` to update the progress every second while the action is in progress.
* The CompositeController sets `url: http://charge-controller.default:8000/sync`.
This tells metacontroller that the ChargeAction resources are handled by a service called `charge-controller` in the `default` namespace.
Deploy these resources by applying the configuration:
```shell
kubectl apply -f charge-crd.yaml
kubectl apply -f charge-controller.yaml
```
You can explore the various resources that were created on your cluster as a result of this command in the [GKE Console](https://console.cloud.google.com/kubernetes/workload) or with `kubectl`, e.g.:
```shell
kubectl get pods
```
The resulting list should contain a running pod with a name like `charge-controller-xxxxxxxxxx-xxxxx`.
## Redeploying after a change
If you make a change to `server.py`, you need to rebuild and push the Docker image:
```shell
docker build -t charge-controller .
docker tag charge-controller gcr.io/$PROJECT_ID/charge-controller
docker push gcr.io/$PROJECT_ID/charge-controller
```
The easiest way to get Kubernetes to restart the workload with the latest version of the container is to delete the pod:
```shell
kubectl delete pod -l 'app=charge-controller'
```
Kubernetes will automatically pull the newest image and recreate the pod.
If you make a change to `charge-controller.yaml`, all you have to do is apply it again:
```shell
kubectl apply -f charge-controller.yaml
```
## Accessing the API
You can use `kubectl` to interact with the API.
Create a file called `charge-action.yaml` with the following contents:
[embedmd]:# (examples/charge-service/charge-action.yaml yaml)
```yaml
apiVersion: example.com/v1
kind: ChargeAction
metadata:
name: my-charge-action
```
Run the following command to create a ChargeAction and observe how its status changes:
```shell
kubectl apply -f charge-action.yaml \
&& watch -n0 kubectl describe chargeaction my-charge-action
```
Over the next 10 seconds, you should see the "Charge Level Percent" increase to 100, and then the state should become "CHARGED".
> **Troubleshooting**:
> If the ChargeAction's status doesn't change, check that metacontroller is installed by running `kubectl --namespace metacontroller get pods`.
> You should see `metacontroller-0 1/1 Running`.
> You can also check the metacontroller logs with `kubectl --namespace metacontroller logs metacontroller-0`
## Deploying the declarative API on the robot.
So far, the Charge Service has been running in the cloud, but we need to run
code on the robot to get it to charge.
We can change this with the `cr-syncer`, a component of Cloud Robotics Core that allows declarative APIs to work between Kubernetes clusters.
In particular, we can run the charge-controller on the robot, while creating the ChargeAction in the cloud cluster.
The `cr-syncer` takes care of copying the ChargeAction to the robot when the
robot has network connectivity.
**Prerequisite**: you'll need a robot that has been successfully [connected to the cloud](connecting-robot.md).
First, remove the controller from the cloud cluster:
```shell
# Note: run this on the workstation
kubectl delete -f charge-controller.yaml
```
Then SSH into the robot, install metacontroller, and bring up the charge-controller there:
```shell
# Note: run this on the robot
kubectl create namespace metacontroller
kubectl apply -f https://raw.githubusercontent.com/metacontroller/metacontroller/master/manifests/production/metacontroller-rbac.yaml
kubectl apply -f https://raw.githubusercontent.com/metacontroller/metacontroller/master/manifests/production/metacontroller-crds-v1.yaml
kubectl apply -f https://raw.githubusercontent.com/metacontroller/metacontroller/master/manifests/production/metacontroller.yaml
export PROJECT_ID=[YOUR_GCP_PROJECT_ID]
kubectl apply -f https://raw.githubusercontent.com/googlecloudrobotics/core/master/docs/how-to/examples/charge-service/charge-crd.yaml
curl https://raw.githubusercontent.com/googlecloudrobotics/core/master/docs/how-to/examples/charge-service/charge-controller.yaml \
| sed "s/\[PROJECT_ID\]/$PROJECT_ID/g" | kubectl apply -f -
```
Now, check that these are running correctly:
```console
# Note: run this on the robot
> kubectl get pods --namespace metacontroller
NAME READY STATUS RESTARTS AGE
metacontroller-0 1/1 Running 0 1m
> kubectl get pods -l app=charge-controller
NAME READY STATUS RESTARTS AGE
charge-controller-57786849f8-xp5kf 1/1 Running 0 77s
```
Switch back to a terminal on your workstation.
As before, you can create a ChargeAction with `kubectl`, but this time it will be
handled by the controller on the robot.
```shell
# Note: run this on the workstation
kubectl delete -f charge-action.yaml
kubectl apply -f charge-action.yaml \
&& watch -n0 kubectl describe chargeaction my-charge-action
```
How does this work?
- The `cr-syncer` runs on the robot and watches custom resources in the cloud.
- It sees the `cr-syncer.cloudrobotics.com/spec-source: cloud` annotation on the
CustomResourceDefinition, which tells it to copy the `spec` from
`my-charge-action` in the cloud cluster into a copy of `my-charge-action` in
the robot cluster.
- While the robot is charging, the robot's charge-controller updates the status
in the robot's cluster.
- The `cr-syncer` copies the status back up to the original resource in the
cloud cluster.
## Cleaning up
In order to stop the controller and remove the CRD you created, run:
```shell
kubectl delete -f charge-controller.yaml -f charge-crd.yaml
```
If you want to uninstall metacontroller too, run:
```shell
kubectl delete namespace metacontroller
```
If you installed on the robot, you'll need to run these commands there too.
================================================
FILE: docs/how-to/deploy-from-sources.md
================================================
# Deploy Cloud Robotics Core from sources
Estimated time: 30 min
This page describes how to set up a Google Cloud Platform (GCP) project
containing the Cloud Robotics Core components.
In particular, this creates a cluster with Google Kubernetes Engine and prepares
it to accept connections from robots, which enables those robots to securely
communicate with GCP.
The commands were tested on machines running Debian (Stretch) or Ubuntu (16.04
and 18.04) Linux.
1. In the GCP Console, go to the [Manage resources][resource-manager] page and
select or create a project.
1. Make sure that [billing][modify-project] is enabled for your project.
1. [Install the Cloud SDK][cloud-sdk]. When prompted, choose the project that you created above.
1. After installing the Cloud SDK, install the `kubectl` command-line tool and the GKE auth plugin:
```shell
gcloud components install kubectl
gcloud components gke-gcloud-auth-plugin
```
If you're using Debian or Ubuntu, you may need to use `apt install kubectl` instead.
1. [Install the Bazel build system][install-bazel].
1. Install additional build dependencies:
```shell
sudo apt-get install default-jdk git python-dev unzip xz-utils
```
[resource-manager]: https://console.cloud.google.com/cloud-resource-manager
[modify-project]: https://cloud.google.com/billing/docs/how-to/modify-project
[cloud-sdk]: https://cloud.google.com/sdk/docs/
[install-bazel]: https://github.com/bazelbuild/bazel/blob/4.0.0/site/docs/install-ubuntu.md
## Build and deploy the project
1. Clone the source repo.
```shell
git clone https://github.com/googlecloudrobotics/core
cd core
```
1. Create application default credentials, which are used to deploy the cloud project and
authorize access to the cloud docker registry.
```shell
gcloud auth application-default login
gcloud auth configure-docker
```
1. Create a Cloud Robotics config in your project:
```shell
./deploy.sh set_config [PROJECT_ID]
```
You can keep the defaults for the other settings by hitting `ENTER`.
This command creates a file `config.sh` containing your choices and stores
in into a cloud-storage bucket named `[PROJECT_ID]-cloud-robotics-config`.
You can verify the settings using:
```shell
gcloud storage cat gs://[PROJECT_ID]-cloud-robotics-config/config.sh
```
1. Build the project. Depending on your computer and internet connection, it may take around 15 minutes.
```shell
bazel build //...
```
1. Deploy the cloud project.
```shell
./deploy.sh create [PROJECT_ID]
```
> **Known issue:**
> Sometimes, this command fails with an error message like
> `Error 403: The caller does not have permission`,
> `Error 403: Service ... not found or permission denied',
> `Bad status during token exchange: 503`, or
> `Error enabling service`.
> In these cases, wait for a minute and try again.
`deploy.sh` created a Kubernetes cluster using Google Kubernetes Engine and used Helm to install the Cloud Robotics Core components.
You can browse these components on the [Workloads dashboard](https://console.cloud.google.com/kubernetes/workload).
Alternatively, you can list them from the console on your workstation:
```console
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
cert-manager-xxx 1/1 Running 0 1m
nginx-ingress-xxx 1/1 Running 0 1m
oauth2-proxy-xxx 0/1 CrashLoopBackOff 4 1m
token-vendor-xxx 1/1 Running 0 1m
```
> **Note** Unless you already set up OAuth, the `oauth2-proxy` will show an error which we will ignore for now.
In addition to the cluster, `deploy.sh` also created:
* the [[PROJECT_ID]-robot Cloud Storage bucket](https://console.cloud.google.com/storage/browser), containing the scripts that connect robots to the cloud, and
* the [Identity & Access Management policies](https://console.cloud.google.com/iam-admin/iam) that authorize robots and humans to communicate with GCP.
With the project deployed, you're ready to [connect a robot to the cloud](connecting-robot.md).
## Update the project
To apply changes made in the source code, run:
```shell
./deploy.sh update [PROJECT_ID]
```
## Clean up
The following command will delete:
* the [cloud-robotics Kubernetes cluster](https://console.cloud.google.com/kubernetes/list)
This can be useful if the cluster is in a broken state.
Be careful with this invocation, since you'll have to redeploy the project and reconnect any robots afterwards.
```shell
./deploy.sh delete [PROJECT_ID]
```
If you want to completely shut down the project, see [the Resource Manager documentation](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects).
## What's next
* [Connecting a robot to the cloud](connecting-robot.md).
* [Setting up OAuth for web UIs](setting-up-oauth.md).
================================================
FILE: docs/how-to/deploying-grpc-service.md
================================================
# Deploying a gRPC service written in C++
Estimated time: 60 min
In this guide we will deploy a gRPC service written in C++ and deploy it to our Google Kubernetes Engine (GKE) cluster in the cloud in such a way that authentication is required for access. We will show how to access the service from the workstation and how to access it from code running in the robot's Kubernetes cluster.
## Prerequisites
* Completed the [Quickstart Guide](../quickstart.md), after which the GCP project is set up and `gcloud-sdk` and `kubectl` are installed and configured.
* `docker` is installed and configured on the workstation ([instructions](https://docs.docker.com/install/linux/docker-ce/ubuntu/)).
* `git` is installed on the workstation.
* For the last part of the guide: A robot that has been successfully [connected to the cloud](connecting-robot.md).
All files for this tutorial are located in
[docs/how-to/examples/greeter-service/](https://github.com/googlecloudrobotics/core/tree/master/docs/how-to/examples/greeter-service).
```shell
git clone https://github.com/googlecloudrobotics/core
cd core/docs/how-to/examples/greeter-service
```
Set your GCP project ID as an environment variable:
```shell
export PROJECT_ID=[YOUR_GCP_PROJECT_ID]
```
## Running gRPC server and client locally
We will use [gRPC's quickstart example](https://grpc.io/docs/quickstart/cpp.html) with small modifications.
If you like to learn more about gRPC in C++, follow their guide first.
The gRPC `helloworld.Greeter` service is defined in `proto/helloworld.proto`.
It accepts a `HelloRequest` containing a `name` and responds with a `HelloReply` containing a `message`.
The server is implemented in `server/server.cc` and the client is implemented `client/client.cc`. The client sends the request with `name: "world"` to the server which responds with `message: "Hello "`.
In this tutorial, we build the server and client code inside Docker containers, so you don't need to install the gRPC library.
If you prefer, you can install the gRPC following [these instructions](https://github.com/grpc/grpc/blob/master/src/cpp/README.md) and build the server and client locally using the provided `Makefile`.
Make sure the Docker daemon is running and your user has the necessary privileges:
```shell
docker run --rm hello-world
```
If this command fails, make sure Docker is installed according to the [installation instructions](https://docs.docker.com/install/linux/docker-ce/ubuntu/).
The Docker image for the server is configured in `server/Dockerfile`:
[embedmd]:# (examples/greeter-service/server/Dockerfile dockerfile)
```dockerfile
FROM grpc/cxx:1.12.0
WORKDIR /data
COPY server/server.cc ./server/
COPY proto/helloworld.proto ./proto/
COPY Makefile ./
RUN make greeter-server && make clean
CMD ["./greeter-server"]
```
We use the [grpc/cxx](https://hub.docker.com/r/grpc/cxx) Docker image which contains all the build tools and libraries (`g++`, `make`, `protoc`, and `grpc`) we need to build the `greeter-server` binary.
The Docker image for the client is configured in `client/Dockerfile` which builds the `greeter-client` from `client/client.cc`.
To build the Docker images, run:
```shell
docker build -t greeter-server -f server/Dockerfile .
docker build -t greeter-client -f client/Dockerfile .
```
> **Note**
> The docker files are in the subfolders `greeter-server/server/` and `greeter-server/client`, but the docker command must be called from `greeter-server/` to include the files which are shared between the server and the client.
You should now have an image tagged `greeter-server` and one tagged `greeter-client` in your local registry:
```shell
docker images | grep greeter
```
To run the server locally, the container's port 50051, which specified as gRPC port in `server.cc`, has to be published to your machine with the flag `-p 50051:50051`:
```shell
docker run --rm -p 50051:50051 --name greeter-server greeter-server
```
In another console run the client container.
The flag `--network=host` tells the container to use your workstation's network stack which allows the client to connect to `localhost`.
```shell
docker run --rm --network=host greeter-client ./greeter-client localhost
```
You should see `Greeter received: Hello world` in the client's output and `Received request: name: "world"` in the server's output. You can also send your own name in the gRPC request to the server, try:
```shell
docker run --rm --network=host greeter-client \
./greeter-client localhost $USER
```
You can stop the server from another terminal by running:
```shell
docker stop greeter-server
```
## Uploading the Docker image to the cloud
In order to be able to run the server as a container in our cloud cluster, we need to upload the Docker image to our GCP project's private [container registry](https://cloud.google.com/container-registry/docs/pushing-and-pulling).
Enable the Docker credential helper:
```shell
gcloud auth configure-docker
```
Tag the image and push it to the registry:
```shell
docker tag greeter-server gcr.io/$PROJECT_ID/greeter-server
docker push gcr.io/$PROJECT_ID/greeter-server
```
The image should now show up in the [container registry](https://console.cloud.google.com/gcr).
## Deploying the service in the cloud using Kubernetes
Run the following command to create `greeter-server.yaml` using the provided template:
```shell
cat greeter-server.yaml.tmpl | envsubst >greeter-server.yaml
```
This file contains the information needed by Kubernetes to run the gRPC service in our cloud cluster.
The three resources, Ingress, Service, and Deployment, are explained in the [deploying a service tutorial](deploying-service.md).
In contrast to the other tutorial, the Ingress tells nginx to forward incoming requests to a gRPC backend.
[embedmd]:# (examples/greeter-service/greeter-server.yaml.tmpl yaml /^/ /---/)
```yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: greeter-server-ingress
annotations:
nginx.ingress.kubernetes.io/backend-protocol: GRPC
nginx.ingress.kubernetes.io/auth-url: "http://token-vendor.default.svc.cluster.local/apis/core.token-vendor/v1/token.verify?robots=true"
spec:
ingressClassName: nginx
rules:
- host: "www.endpoints.${PROJECT_ID}.cloud.goog"
http:
paths: # must match the namespace and service name in the proto
- path: /helloworld.Greeter/
pathType: Prefix
backend:
service:
name: greeter-server-service
# must match the port used in server.cc
port:
number: 50051
---
```
Make sure that `kubectl` points to the correct GCP project:
```shell
kubectl config get-contexts
```
If the correct cluster is not marked with an asterisk in the output, you can switch contexts with `kubectl config use-context [...]`.)
Then deploy by applying the configuration:
```shell
kubectl apply -f greeter-server.yaml
```
You can explore the various resources that were created on your cluster as a result of this command in the [GKE Console](https://console.cloud.google.com/kubernetes/workload) or with `kubectl`, e.g.:
```shell
kubectl get pods
```
The resulting list should contain a running pod with a name like `greeter-server-xxxxxxxxxx-xxxxx`.
## Redeploying after a change
For convenience, `deploy.sh` provides some commands to create, delete, and update the service.
If you make changes to `greeter-server.yaml.tmpl`, all you have to do is run:
```shell
./deploy.sh update_config
```
If you make changes to `server.cc`, you need to run:
```shell
./deploy.sh update_server
```
This builds, tags, and pushes the Docker image, and then forces a redeployment of the image by calling `kubectl delete pod -l 'app=greeter-server-app'`.
It also updates the resource definitions, so you don't have to run `./deploy.sh update_config` if you made changes to both files.
## Accessing the API
In `client/client.cc` we use `grpc::InsecureChannelCredentials()` when talking to `localhost` while we use `grpc::GoogleDefaultCredentials()` when talking to any other address.
SSL authentication with credentials from the user or robot are necessary when talking to the `greeter-server` in the Cloud Robotics project.
[embedmd]:# (examples/greeter-service/client/client.cc c++ /^ +if.*localhost/ /^ +}$/)
```c++
if (grpc_endpoint.find("localhost:") == 0 ||
grpc_endpoint.find("127.0.0.1:") == 0) {
channel_creds = grpc::InsecureChannelCredentials();
} else {
channel_creds = grpc::GoogleDefaultCredentials();
}
```
Let's try to access our server.
We have to connect to the nginx ingress which is hosted on `www.endpoints.$PROJECT_ID.cloud.goog:443`.
To ensure we have valid credentials to talk to nginx we have to mount our `~/.config` folder in the container.
```shell
docker run --rm -v ~/.config:/root/.config greeter-client \
./greeter-client www.endpoints.$PROJECT_ID.cloud.goog:443 workstation
```
Recall that when running `./greeter-server` on your workstation you were able to see the server's log output upon receiving a request.
This log output is also recorded when the server is running in the cloud cluster. To inspect it, run:
```shell
kubectl logs -l 'app=greeter-server-app'
```
Or go to the [GKE Console](https://console.cloud.google.com/kubernetes/workload), select the `greeter-server` workload and click on "Container logs".
## Accessing the API from the robot
In order to run `greeter-client` on the robot's Kubernetes cluster, we again package it as a Docker image and push it to our container registry, to which the robot also has access.
Our deploy script offers a command to build, tag, and push the image to the cloud registry, like we did with the server container:
```shell
./deploy.sh push_client
```
And finally, to execute the script, SSH into robot and run:
```shell
export PROJECT_ID=[YOUR_GCP_PROJECT_ID]
docker pull grpc/cxx:1.12.0 # This may take several minutes, depending on WiFi connection
kubectl run -ti --rm --restart=Never --image=gcr.io/$PROJECT_ID/greeter-client greeter-client \
-- ./greeter-client www.endpoints.$PROJECT_ID.cloud.goog:443 robot
```
You should see the server's answer `Hello robot`.
Two things are noteworthy:
* The `greeter-client` Docker image was pulled from the container registry without the need for additional credentials. This worked because there is a periodical job running on the robot's Kubernetes cluster that refreshes the GCR credentials. Run `kubectl get pods` on the robot and you will see pod names that start with `gcr-credential-refresher`.
* `grpc::GoogleDefaultCredentials()` in the client's code automatically obtained credentials that allowed the robot to access the cloud cluster. This worked because the the local Metadata Server obtains access tokens for the robot in the background.
## Cleaning up
In order to stop the service in the cloud cluster and revert the configuration changes, run:
```shell
./deploy.sh delete
```
================================================
FILE: docs/how-to/deploying-service.md
================================================
# Deploying a service to the cloud cluster
Estimated time: 60 min
In this guide we will write a HTTP service in Python and deploy it to our Google Kubernetes Engine (GKE) cluster in the cloud in such a way that authentication is required for access. We will show how to access the service from the workstation and how to access it from code running in the robot's Kubernetes cluster.
## Prerequisites
* Completed the [Quickstart Guide](../quickstart.md), after which the GCP project is set up and `gcloud-sdk` and `kubectl` are installed and configured.
* `docker` is installed and configured on the workstation ([instructions](https://docs.docker.com/install/linux/docker-ce/ubuntu/)).
* `python3`, `python3-pip`, and `curl` are installed on the workstation.
* For the last part of the guide: A robot that has been successfully [connected to the cloud](connecting-robot.md).
Create a directory for the code examples of this guide, e.g.:
```shell
mkdir hello-service
cd hello-service
```
Set your GCP project ID as an environment variable:
```shell
export PROJECT_ID=[YOUR_GCP_PROJECT_ID]
```
All files created in this tutorial can be found in
[docs/how-to/examples/hello-service/](https://github.com/googlecloudrobotics/core/tree/master/docs/how-to/examples/hello-service).
If you download the files, you have to replace the placeholders `[PROJECT_ID]` with your GCP project ID:
```shell
sed -i "s/\[PROJECT_ID\]/$PROJECT_ID/g" client/client.py server/hello-server.yaml
```
## A simple HTTP server
Create a subdirectory for the server code:
```shell
mkdir server
cd server
```
Paste the following into a file called `server.py`:
[embedmd]:# (examples/hello-service/server/server.py python)
```python
from http import server
import signal
import sys
class MyRequestHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
print('Received a request')
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write(b'Server says hello!\n')
def main():
# Terminate process when Kubernetes sends SIGTERM.
signal.signal(signal.SIGTERM, lambda *_: sys.exit(0))
server_address = ('', 8000)
httpd = server.HTTPServer(server_address, MyRequestHandler)
httpd.serve_forever()
if __name__ == '__main__':
main()
```
This Python program implements a server that listens on port 8000 for incoming HTTP GET requests. When such a request is received, it prints a line to stdout and responds to the request with a short message.
You can try it out with:
```shell
python server.py
```
If you see `ImportError: No module named http`, you are most likely using Python 2.x; try `python3` instead of `python`.)
Then, from another terminal on the same workstation, run:
```shell
curl -i http://localhost:8000
```
You should see the headers indicating that the request was successful (`200 OK`) and the server's response message. You can also try entering `localhost:8000` in your browser's address bar.
## Dockerizing the service
Next, to prepare our Python program for deployment in the cloud, we package it as a Docker image.
Make sure that the docker daemon is running and that your user has the necessary privileges:
```shell
docker run --rm hello-world
```
If this command fails, make sure Docker is installed according to the [installation instructions](https://docs.docker.com/install/linux/docker-ce/ubuntu/).
In the same directory as `server.py`, create a `Dockerfile` with the following contents:
[embedmd]:# (examples/hello-service/server/Dockerfile dockerfile)
```dockerfile
FROM python:alpine
WORKDIR /data
COPY server.py ./
CMD [ "python", "-u", "./server.py" ]
```
(Note: the `-u` option disables line-buffering; Python's line-buffering can prevent output from appearing immediately in the Docker logs.)
To build the Docker image, run:
```shell
docker build -t hello-server .
```
You should now have an image tagged `hello-server` in your local registry:
```shell
docker images | grep hello-server
```
It can be run locally with:
```shell
docker run -ti --rm -p 8000:8000 hello-server
```
You should now be able to send requests to the server with `curl` as before.
## Uploading the Docker image to the cloud
In order to be able to run the server as a container in our cloud cluster, we need to upload the Docker image to our GCP project's private [container registry](https://cloud.google.com/container-registry/docs/pushing-and-pulling).
Enable the Docker credential helper:
```shell
gcloud auth configure-docker
```
Tag the image and push it to the registry:
```shell
docker tag hello-server gcr.io/$PROJECT_ID/hello-server
docker push gcr.io/$PROJECT_ID/hello-server
```
The image should now show up in the [Container Registry](https://console.cloud.google.com/gcr).
## Deploying the service in the cloud using Kubernetes
Create a file called `hello-server.yaml` with the following contents:
[embedmd]:# (examples/hello-service/server/hello-server.yaml yaml)
```yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: hello-server-ingress
annotations:
nginx.ingress.kubernetes.io/auth-url: "http://token-vendor.default.svc.cluster.local/apis/core.token-vendor/v1/token.verify?robots=true"
spec:
ingressClassName: nginx
rules:
- host: www.endpoints.[PROJECT_ID].cloud.goog
http:
paths:
- path: /apis/hello-server
pathType: Prefix
backend:
service:
name: hello-server-service
port:
number: 8000
---
apiVersion: v1
kind: Service
metadata:
name: hello-server-service
spec:
ports:
- name: hello-server-port
port: 8000
# the selector is used to link pods to services
selector:
app: hello-server-app
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello-server
spec:
# all pods matching this selector belong to this deployment
selector:
matchLabels:
app: hello-server-app
template:
metadata:
# the other side of the link between services and pods
labels:
app: hello-server-app
spec:
containers:
- name: hello-server
image: gcr.io/[PROJECT_ID]/hello-server:latest
ports:
# must match the port of the service
- containerPort: 8000
```
This file contains the information needed by Kubernetes to run our HTTP service in our cloud cluster. In the following, we will go over it bit by bit assuming basic familiarity with the [YAML format](https://en.wikipedia.org/wiki/YAML).
We define three Kubernetes resources:
* The *Ingress* contains rules that tell our cluster's nginx (the HTTP server that handles all incoming traffic) which incoming requests to forward to our service.
* The *Service* defines how our service is exposed within the cluster.
* The *Deployment* describes the Docker container to run.
Metadata, labels, and selectors are used to tie the three resources together.
A detailed explanation of the Kubernetes resources is out of scope for this guide, check out the [Kubernetes docs](https://kubernetes.io/docs/home/) to get started. There are a few points worth mentioning, though:
* In the Ingress and Deployment, don't forget to replace `[PROJECT_ID]` with your GCP project ID.
* In the Ingress, there is an annotation with key `nginx.ingress.kubernetes.io/auth-url`. This tells our cluster's nginx to check the authorization of each request before forwarding it to the `hello-server`. The value `http://token-vendor...` is the cluster-internal DNS address of a token verifier service that is running in the cluster as part of the Cloud Robotics Core platform.
* The Ingress rules specify that our hello-server will be reachable at `https://www.endpoints.[PROJECT_ID].cloud.goog/apis/hello-server`.
* The Deployment contains the full reference of the Docker image that we pushed to the Container Registry in the previous section.
Make sure that `kubectl` points to the correct GCP project:
```shell
kubectl config get-contexts
```
If the correct cluster is not marked with an asterisk in the output, you can switch to it with `kubectl config use-context [...]`.)
Then deploy by applying the configuration:
```shell
kubectl apply -f hello-server.yaml
```
You can explore the various resources that were created on your cluster as a result of this command in the [GKE Console](https://console.cloud.google.com/kubernetes/workload) or with `kubectl`, e.g.:
```shell
kubectl get pods
```
The resulting list should contain a running pod with a name like `hello-server-xxxxxxxxxx-xxxxx`.
## Redeploying after a change
If you make a change to `server.py`, you need to rebuild and push the Docker image:
```shell
docker build -t hello-server .
docker tag hello-server gcr.io/$PROJECT_ID/hello-server
docker push gcr.io/$PROJECT_ID/hello-server
```
The easiest way to get Kubernetes to restart the workload with the latest version of the container is to delete the pod:
```shell
kubectl delete pod -l 'app=hello-server-app'
```
Kubernetes will automatically pull the newest image and recreate the pod.
If you make a change to `hello-server.yaml`, all you have to do is apply it again:
```shell
kubectl apply -f hello-server.yaml
```
## Accessing the API
Let's try to access our server as we did before:
```shell
curl -i https://www.endpoints.$PROJECT_ID.cloud.goog/apis/hello-server
```
This should result in a `401 Unauthorized` error because we did not supply any authorization information with the request.
(Note: If you comment out the `auth-url` annotation in the Ingress definition and reapply it, this request will succeed.)
We can, however, easily obtain credentials from `gcloud` and attach them to our request by means of an "Authorization" header:
```shell
token=$(gcloud auth application-default print-access-token)
curl -i -H "Authorization: Bearer $token" https://www.endpoints.$PROJECT_ID.cloud.goog/apis/hello-server
```
If this command fails because "Application Default Credentials are not available", you need to first run:
```shell
gcloud auth application-default login --project=$PROJECT_ID
```
And follow the instructions in your browser.
Recall that when running `server.py` on your workstation you were able to see the server's log output upon receiving a request. This log output is also recorded when the server is running in the cloud cluster. To inspect it, run:
```shell
kubectl logs -l 'app=hello-server-app'
```
Or go to the [GKE Console](https://console.cloud.google.com/kubernetes/workload), select the `hello-server` workload and click on "Container logs".
Next, let's access the API from some Python code. Eventually, we will build another Docker image from this code, so it needs to live in a separate directory:
```shell
cd ..
mkdir client
cd client
```
Get some dependencies:
```shell
pip3 install --upgrade google-auth requests
```
(Depending on your local installation, you might have to use `pip3`.)
Create `client.py` with the following contents:
[embedmd]:# (examples/hello-service/client/client.py python)
```python
import google.auth
import google.auth.transport.requests as requests
credentials, project_id = google.auth.default()
authed_session = requests.AuthorizedSession(credentials)
response = authed_session.request(
"GET", "https://www.endpoints.[PROJECT_ID].cloud.goog/apis/hello-server")
print(response.status_code, response.reason, response.text)
```
Replace `[PROJECT_ID]` with your GCP project ID.
This script:
* uses [`google-auth`](https://google-auth.readthedocs.io/en/latest/user-guide.html) to obtain application default credentials (just as we previously did with the `gcloud` CLI),
* uses the [`requests`](http://docs.python-requests.org/en/stable/) library to perform an authenticated request to our API,
* prints the response to stdout.
Try it out:
```shell
python3 client.py
```
You will get a warning about using end user credentials. You can safely ignore this warning; we will eventually be using a robot's credentials.)
## Accessing the API from the robot
In order to run this script on the robot's Kubernetes cluster, we again package it as a Docker image and push it to our container registry, to which the robot also has access.
Create a `Dockerfile` containing:
[embedmd]:# (examples/hello-service/client/Dockerfile dockerfile)
```dockerfile
FROM python:alpine
RUN pip install --no-cache-dir google-auth requests
WORKDIR /data
COPY client.py ./
CMD [ "python", "-u", "./client.py" ]
```
Build, tag, and push the image:
```shell
docker build -t hello-client .
docker tag hello-client gcr.io/$PROJECT_ID/hello-client
docker push gcr.io/$PROJECT_ID/hello-client
```
And finally, to execute the script, SSH into robot and run:
```shell
kubectl run -ti --rm --restart=Never --image=gcr.io/$PROJECT_ID/hello-client hello-client
```
You should see the server's message.
Two things are noteworthy:
* The `hello-client` Docker image was pulled from the Container Registry without the need for additional credentials. This worked because there is a periodical job running on the robot's Kubernetes cluster that refreshes the GCR credentials. Run `kubectl get pods` on the robot and you will see pod names that start with `gcr-credential-refresher`.
* The `google.auth.default()` invocation in the Python code automatically obtained credentials that allowed the robot to access the cloud cluster. This worked because the `google-auth` library queried the local Metadata Server, which obtains access tokens for the robot in the background.
## Cleaning up
In order to stop the service in the cloud cluster and revert the configuration changes, change to the `server` directory and run:
```shell
kubectl delete -f hello-server.yaml
```
================================================
FILE: docs/how-to/examples/charge-service/Dockerfile
================================================
FROM python:alpine
WORKDIR /data
COPY server.py ./
CMD [ "python", "-u", "./server.py" ]
================================================
FILE: docs/how-to/examples/charge-service/charge-action.yaml
================================================
apiVersion: example.com/v1
kind: ChargeAction
metadata:
name: my-charge-action
================================================
FILE: docs/how-to/examples/charge-service/charge-controller.yaml
================================================
apiVersion: metacontroller.k8s.io/v1alpha1
kind: CompositeController
metadata:
name: charge-controller
spec:
generateSelector: true
parentResource:
apiVersion: example.com/v1
resource: chargeactions
resyncPeriodSeconds: 1
hooks:
sync:
webhook:
url: http://charge-controller.default:8000/sync
---
apiVersion: v1
kind: Service
metadata:
name: charge-controller
spec:
selector:
app: charge-controller
ports:
- port: 8000
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: charge-controller
spec:
replicas: 1
selector:
matchLabels:
app: charge-controller
template:
metadata:
labels:
app: charge-controller
spec:
containers:
- name: controller
image: gcr.io/[PROJECT_ID]/charge-controller
ports:
- containerPort: 8000
================================================
FILE: docs/how-to/examples/charge-service/charge-crd.yaml
================================================
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: chargeactions.example.com
annotations:
cr-syncer.cloudrobotics.com/spec-source: cloud
spec:
group: example.com
names:
kind: ChargeAction
plural: chargeactions
singular: chargeaction
scope: Namespaced
versions:
- name: v1
served: true
storage: true
subresources:
status: {}
schema:
openAPIV3Schema:
type: object
x-kubernetes-preserve-unknown-fields: true
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cloud-robotics:cr-syncer:chartaction
labels:
cr-syncer.cloudrobotics.com/aggregate-to-robot-service: "true"
rules:
- apiGroups:
- example.com
resources:
- chargeactions
verbs:
- get
- list
- watch
- update
================================================
FILE: docs/how-to/examples/charge-service/server.py
================================================
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import signal
import sys
import time
import uuid
class Controller(BaseHTTPRequestHandler):
def sync(self, parent, children):
"""Actuate a ChargeAction custom resource.
The resource is actuated by using the Charge Service to send the robot to a
charger. While charging is in progress, the status of the resource is
updated to reflect the charge level.
Because the ChargeAction is handled by an external service, this controller
doesn't create any child resources, ie the `children` list is empty.
Args:
parent: The current ChargeAction resource.
children: Unused.
Returns:
A dict containing the latest status of the action, and an empty list of
children, eg:
{
"status": {
"state": "OK",
},
"children": [],
}
"""
# Get current status and copy to start building next status.
current_status = parent.get("status", None) or {}
desired_status = dict(current_status)
state = current_status.get("state", "CREATED")
if state == "CREATED":
# The ChargeAction has just been created. Use the external Charge Service
# to start charging. Store the request ID in the status so we can use it
# to check the state of the charge request.
request_id = self.charge_service.start_charging()
desired_status["state"] = "IN_PROGRESS"
desired_status["request_id"] = request_id
elif state == "IN_PROGRESS":
try:
# Get the progress of the charge request from the external service.
progress = self.charge_service.get_progress(
current_status["request_id"])
desired_status["charge_level_percent"] = progress
if progress == 100:
# Charging has completed.
desired_status["state"] = "OK"
except ValueError as e:
# The charge request was not found. This could be because the robot was
# restarted during a charge, and the request was forgotten.
desired_status["state"] = "ERROR"
desired_status["message"] = str(e)
elif state in ["OK", "CANCELLED", "ERROR"]:
# Terminal state, do nothing.
pass
else:
desired_status["state"] = "ERROR"
desired_status["message"] = "Unrecognized state: %r" % state
return {"status": desired_status, "children": []}
def do_POST(self):
"""Serve the sync() function as a JSON webhook."""
observed = json.loads(self.rfile.read(int(self.headers["content-length"])))
desired = self.sync(observed["parent"], observed["children"])
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(desired).encode('utf-8'))
class ChargeService(object):
"""ChargeService wraps an external API that send the robot to a charger.
For this example, it just fakes the charging process.
"""
SECONDS_FOR_FULL_CHARGE = 10
def __init__(self):
self._requests = {}
def start_charging(self):
request_id = str(uuid.uuid4())
self._requests[request_id] = time.time()
return request_id
def get_progress(self, request_id):
if request_id not in self._requests:
raise ValueError("invalid request ID")
charge_time = time.time() - self._requests[request_id]
if charge_time > self.SECONDS_FOR_FULL_CHARGE:
return 100
else:
return int(100 * (charge_time / self.SECONDS_FOR_FULL_CHARGE))
# Terminate process when Kubernetes sends SIGTERM.
signal.signal(signal.SIGTERM, lambda *_: sys.exit(0))
Controller.charge_service = ChargeService()
HTTPServer(("", 8000), Controller).serve_forever()
================================================
FILE: docs/how-to/examples/greeter-service/Makefile
================================================
#
# Copyright 2019 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
HOST_SYSTEM = $(shell uname | cut -f 1 -d_)
SYSTEM ?= $(HOST_SYSTEM)
CXX = g++
CPPFLAGS += `pkg-config --cflags protobuf grpc`
CXXFLAGS += -std=c++11 -I .
ifeq ($(SYSTEM),Darwin)
LDFLAGS += -L/usr/local/lib `pkg-config --libs protobuf grpc++ grpc`\
-lgrpc++_reflection\
-ldl
else
LDFLAGS += -L/usr/local/lib `pkg-config --libs protobuf grpc++ grpc`\
-Wl,--no-as-needed -lgrpc++_reflection -Wl,--as-needed\
-ldl
endif
PROTOC = protoc
GRPC_CPP_PLUGIN = grpc_cpp_plugin
GRPC_CPP_PLUGIN_PATH ?= `which $(GRPC_CPP_PLUGIN)`
PROTOS_PATH = ./proto/
vpath %.proto $(PROTOS_PATH)
all: greeter-server greeter-client
greeter-server: helloworld.pb.o helloworld.grpc.pb.o server/server.o
$(CXX) $^ $(LDFLAGS) -o $@
greeter-client: helloworld.pb.o helloworld.grpc.pb.o client/client.o
$(CXX) $^ $(LDFLAGS) -o $@
.PRECIOUS: %.grpc.pb.cc
%.grpc.pb.cc: %.proto
$(PROTOC) -I $(PROTOS_PATH) --grpc_out=. --plugin=protoc-gen-grpc=$(GRPC_CPP_PLUGIN_PATH) $<
.PRECIOUS: %.pb.cc
%.pb.cc: %.proto
$(PROTOC) -I $(PROTOS_PATH) --cpp_out=. $<
clean:
rm -f *.o client/*.o server/*.o *.pb *.pb.cc *.pb.h
================================================
FILE: docs/how-to/examples/greeter-service/client/Dockerfile
================================================
FROM grpc/cxx:1.12.0
WORKDIR /data
COPY client/client.cc ./client/
COPY proto/helloworld.proto ./proto/
COPY Makefile ./
RUN make greeter-client && make clean
CMD ["./greeter-client"]
================================================
FILE: docs/how-to/examples/greeter-service/client/client.cc
================================================
/*
*
* Copyright 2019 The Cloud Robotics Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include
#include
#include
#include
#include "helloworld.grpc.pb.h"
using grpc::Channel;
using grpc::ChannelCredentials;
using grpc::ClientContext;
using grpc::Status;
using helloworld::Greeter;
using helloworld::HelloReply;
using helloworld::HelloRequest;
class GreeterClient {
public:
GreeterClient(std::shared_ptr channel)
: stub_(Greeter::NewStub(channel)) {}
// Assembles the client's payload, sends it and presents the response back
// from the server.
std::string SayHello(const std::string& user) {
// Data we are sending to the server.
HelloRequest request;
request.set_name(user);
// Container for the data we expect from the server.
HelloReply reply;
// Context for the client. It could be used to convey extra information to
// the server and/or tweak certain RPC behaviors.
ClientContext context;
// The actual RPC.
Status status = stub_->SayHello(&context, request, &reply);
// Act upon its status.
if (status.ok()) {
return reply.message();
} else {
std::cout << status.error_code() << ": " << status.error_message()
<< std::endl;
return "RPC failed";
}
}
private:
std::unique_ptr stub_;
};
int main(int argc, char** argv) {
if (argc < 2) {
const std::string client_path(argv[0]);
std::cout << "Usage:" << std::endl;
std::cout << " " << client_path << " []"
<< std::endl;
std::cout << "Example:" << std::endl;
std::cout << " " << client_path
<< " www.endpoints.${PROJECT_ID}.cloud.goog:443" << std::endl;
return 0;
}
// The first parameter is the server's address, optionally containing the
// port.
std::string grpc_endpoint(argv[1]);
if (grpc_endpoint.find(":") == std::string::npos) {
// Set the default port of the server.
grpc_endpoint += ":50051";
}
// The optional second parameter is the name to be sent to the server.
std::string name("world");
if (argc >= 3) {
name = argv[2];
}
std::cout << "Sending request to " << grpc_endpoint << " ..." << std::endl;
// We are communicating via SSL to the endpoint service using the credentials
// of the user or robot running the client.
// We don't use credentials when connecting to localhost for testing.
std::shared_ptr channel_creds;
if (grpc_endpoint.find("localhost:") == 0 ||
grpc_endpoint.find("127.0.0.1:") == 0) {
channel_creds = grpc::InsecureChannelCredentials();
} else {
channel_creds = grpc::GoogleDefaultCredentials();
}
GreeterClient greeter(grpc::CreateChannel(grpc_endpoint, channel_creds));
std::string user(name);
std::string reply = greeter.SayHello(user);
std::cout << "Greeter received: " << reply << std::endl;
return 0;
}
================================================
FILE: docs/how-to/examples/greeter-service/deploy.sh
================================================
#!/bin/bash
#
# Copyright 2019 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o pipefail -o errexit
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd ${DIR}
function die {
echo "$1" >&2
exit 1
}
function push_image {
local target=$1
docker build -f "${target}/Dockerfile" -t "greeter-${target}" .
docker tag "greeter-${target}" "gcr.io/${PROJECT_ID}/greeter-${target}"
docker push "gcr.io/${PROJECT_ID}/greeter-${target}"
}
function create_config {
cat greeter-server.yaml.tmpl | envsubst >greeter-server.yaml
}
# public functions
function push_client {
push_image client
}
function update_config {
create_config
kubectl apply -f greeter-server.yaml
}
function update_server {
push_image server
kubectl delete pod -l 'app=greeter-server-app'
update_config
}
function create {
push_image server
push_client
update_config
}
function delete {
create_config
kubectl delete -f greeter-server.yaml
}
# main
if [[ -z ${PROJECT_ID} ]]; then
die "Set PROJECT_ID first: export PROJECT_ID=[GCP project id]"
fi
if [[ ! "$1" =~ ^(create|delete|update_config|update_server|push_client)$ ]]; then
die "Usage: $0 {create|delete|update_config|update_server|push_client}"
fi
# call arguments verbatim:
"$@"
================================================
FILE: docs/how-to/examples/greeter-service/greeter-server.yaml.tmpl
================================================
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: greeter-server-ingress
annotations:
nginx.ingress.kubernetes.io/backend-protocol: GRPC
nginx.ingress.kubernetes.io/auth-url: "http://token-vendor.default.svc.cluster.local/apis/core.token-vendor/v1/token.verify?robots=true"
spec:
ingressClassName: nginx
rules:
- host: "www.endpoints.${PROJECT_ID}.cloud.goog"
http:
paths: # must match the namespace and service name in the proto
- path: /helloworld.Greeter/
pathType: Prefix
backend:
service:
name: greeter-server-service
# must match the port used in server.cc
port:
number: 50051
---
apiVersion: v1
kind: Service
metadata:
name: greeter-server-service
spec:
ports:
- # optional descriptive name for the service port
name: grpc-port
# must match the service port specified in ingress
port: 50051
# the selector is used to link pods to services
selector:
app: greeter-server-app
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: greeter-server
spec:
replicas: 1
# all pods matching this selector belong to this deployment
selector:
matchLabels:
app: greeter-server-app
template:
metadata:
# the other side of the link between services and pods
labels:
app: greeter-server-app
spec:
containers:
- name: greeter-server
image: "gcr.io/${PROJECT_ID}/greeter-server:latest"
ports:
# must match the port of the service
- containerPort: 50051
================================================
FILE: docs/how-to/examples/greeter-service/proto/helloworld.proto
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package helloworld;
// The greeting service definition.
service Greeter {
// Sends a greeting.
rpc SayHello (HelloRequest) returns (HelloReply) {}
}
// The request message containing the user's name.
message HelloRequest {
string name = 1;
}
// The response message containing the greetings.
message HelloReply {
string message = 1;
}
================================================
FILE: docs/how-to/examples/greeter-service/server/Dockerfile
================================================
FROM grpc/cxx:1.12.0
WORKDIR /data
COPY server/server.cc ./server/
COPY proto/helloworld.proto ./proto/
COPY Makefile ./
RUN make greeter-server && make clean
CMD ["./greeter-server"]
================================================
FILE: docs/how-to/examples/greeter-service/server/server.cc
================================================
/*
*
* Copyright 2019 The Cloud Robotics Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include
#include
#include
#include
#include
#include
#include
#include "helloworld.grpc.pb.h"
using grpc::Server;
using grpc::ServerBuilder;
using grpc::ServerContext;
using grpc::Status;
using helloworld::Greeter;
using helloworld::HelloReply;
using helloworld::HelloRequest;
// The gRPC server is defined globally so that SIGTERM handler can shut it
// down when Kubernetes stops the process.
std::unique_ptr server;
// Logic and data behind the server's behavior.
class GreeterServiceImpl final : public Greeter::Service {
Status SayHello(ServerContext* context, const HelloRequest* request,
HelloReply* reply) override {
std::cout << "Received request: " << request->ShortDebugString()
<< std::endl;
std::string prefix("Hello ");
reply->set_message(prefix + request->name());
return Status::OK;
}
};
void RunServer() {
std::string server_address("0.0.0.0:50051");
GreeterServiceImpl service;
ServerBuilder builder;
// Listen on the given address without any authentication mechanism. Cloud
// Robotics Core ensures that clients are authenticated.
builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
// Register "service" as the instance through which we'll communicate with
// clients. In this case it corresponds to a *synchronous* service.
builder.RegisterService(&service);
// Finally assemble the server.
server = builder.BuildAndStart();
std::cout << "Server listening on " << server_address << std::endl;
std::signal(SIGTERM, [](int) {
// When SIGTERM is received, shutdown the gRPC server.
server->Shutdown();
});
// Wait for the server to shutdown.
server->Wait();
}
int main(int argc, char** argv) {
RunServer();
return 0;
}
================================================
FILE: docs/how-to/examples/hello-service/client/Dockerfile
================================================
FROM python:alpine
RUN pip install --no-cache-dir google-auth requests
WORKDIR /data
COPY client.py ./
CMD [ "python", "-u", "./client.py" ]
================================================
FILE: docs/how-to/examples/hello-service/client/client.py
================================================
import google.auth
import google.auth.transport.requests as requests
credentials, project_id = google.auth.default()
authed_session = requests.AuthorizedSession(credentials)
response = authed_session.request(
"GET", "https://www.endpoints.[PROJECT_ID].cloud.goog/apis/hello-server")
print(response.status_code, response.reason, response.text)
================================================
FILE: docs/how-to/examples/hello-service/server/Dockerfile
================================================
FROM python:alpine
WORKDIR /data
COPY server.py ./
CMD [ "python", "-u", "./server.py" ]
================================================
FILE: docs/how-to/examples/hello-service/server/hello-server.yaml
================================================
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: hello-server-ingress
annotations:
nginx.ingress.kubernetes.io/auth-url: "http://token-vendor.default.svc.cluster.local/apis/core.token-vendor/v1/token.verify?robots=true"
spec:
ingressClassName: nginx
rules:
- host: www.endpoints.[PROJECT_ID].cloud.goog
http:
paths:
- path: /apis/hello-server
pathType: Prefix
backend:
service:
name: hello-server-service
port:
number: 8000
---
apiVersion: v1
kind: Service
metadata:
name: hello-server-service
spec:
ports:
- name: hello-server-port
port: 8000
# the selector is used to link pods to services
selector:
app: hello-server-app
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello-server
spec:
# all pods matching this selector belong to this deployment
selector:
matchLabels:
app: hello-server-app
template:
metadata:
# the other side of the link between services and pods
labels:
app: hello-server-app
spec:
containers:
- name: hello-server
image: gcr.io/[PROJECT_ID]/hello-server:latest
ports:
# must match the port of the service
- containerPort: 8000
================================================
FILE: docs/how-to/examples/hello-service/server/server.py
================================================
from http import server
import signal
import sys
class MyRequestHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
print('Received a request')
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write(b'Server says hello!\n')
def main():
# Terminate process when Kubernetes sends SIGTERM.
signal.signal(signal.SIGTERM, lambda *_: sys.exit(0))
server_address = ('', 8000)
httpd = server.HTTPServer(server_address, MyRequestHandler)
httpd.serve_forever()
if __name__ == '__main__':
main()
================================================
FILE: docs/how-to/running-ros-node.md
================================================
# Running a ROS node as a Kubernetes deployment
Estimated time: 10 min
The following instructions describe how to setup a Kubernetes cluster on a robot
running Ubuntu 20.04 and run a ROS node on it.
The installation script installs and configures:
* Docker
* A single-node Kubernetes cluster (packages: kubectl, kubeadm, kubelet)
Once you've done this, you can use Kubernetes to:
* Reduce downtime during updates with Kubernetes deployments
* Apply CPU, disk or memory quotas to individual processes
* Add additional compute nodes to the cluster, such as an Nvidia Jetson
* Use a network plugin to apply network access control
* Manage project configuration or sensitive secrets such as account credentials
For more details, refer to the [Kubernetes documentation](https://kubernetes.io/docs/home/).
## Installing the cluster on the robot
See .
## Run a ROS node with Kubernetes
If you're already using ROS on your robot, you can run a ROS node inside Kubernetes that will communicate with other nodes on the robot. If not, you can follow the [ROS tutorials](http://wiki.ros.org/ROS/Tutorials) to get started.
First, make sure you're running `roscore`. In another terminal, please run:
```shell
roscore
```
> **Caution:** If you have a more complicated ROS setup, such as a ROS master running on another machine, you might need to change `ROS_MASTER_URI` or `ROS_IP` in rostopic-echo.yaml.
You can run a ROS node by creating a Kubernetes Deployment object, and you can describe a Deployment in a YAML file.
For example, this YAML file describes a Deployment that runs `rostopic echo`.
Create file called `rostopic-echo.yaml` with the following contents:
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: rostopic-echo
spec:
selector:
matchLabels:
app: rostopic-echo
template:
metadata:
labels:
app: rostopic-echo
spec:
containers:
- name: rostopic-echo
image: ros:melodic-ros-core
args:
- rostopic
- echo
- chatter
env:
- name: ROS_MASTER_URI
value: http://192.168.9.1:11311
- name: ROS_IP
value: 192.168.9.1
hostNetwork: true
```
> **Note:** For simplicity, this example uses `hostNetwork: true` to disable network isolation. Advanced users can disable host networking to improve security. For more information, see the networking documentation for Docker , Kubernetes and ROS .
After creating `rostopic-echo.yaml`, use `kubectl` to apply it to your cluster:
```shell
kubectl apply -f rostopic-echo.yaml
```
Depending on your internet connection, it will take a minute or so to download the Docker image. Wait until you see `Running`:
```console
$ watch kubectl get pods -l app=rostopic-echo
NAMESPACE NAME READY STATUS RESTARTS AGE
default rostopic-echo-576cbf47c7-dtlc6 1/1 Running 0 1m
```
Now, publish a ROS message and check that it was received inside Kubernetes:
```console
$ rostopic pub -1 chatter std_msgs/String "Hello, world"
$ kubectl logs -l app=rostopic-echo
data: "Hello, world"
---
```
Kubernetes will keep this node running until you delete the deployment:
```shell
kubectl delete -f rostopic-echo.yaml
```
================================================
FILE: docs/how-to/setting-up-oauth.md
================================================
# Setting up OAuth for web UIs
Estimated time: 5 min
When a user loads a web UI hosted in the cloud Kubernetes cluster, the server has to authenticate them before allowing them to use the service.
To enable this, you'll need to set up OAuth with the Cloud Console.
Once you've completed these steps, you'll be able to access services with web UIs, such as [Grafana](https://grafana.com/).
If you haven't already, complete the [Quickstart Guide](../quickstart.md) or [Deploy Cloud Robotics Core from sources](deploy-from-sources.md) to set up your GCP project.
## Create OAuth credentials
1. Open the [cloud console](https://console.cloud.google.com/) and ensure that
your cloud project is selected in the project selector dropdown at the top.
1. Configure the OAuth consent screen: [APIs & Services → Credentials → OAuth consent screen](https://console.cloud.google.com/apis/credentials/consent).
* User Type: Internal
* Application name: My Cloud Robotics Application
* Support email: *your email address*
* Add `[PROJECT_ID].cloud.goog` to Authorized domains (where `[PROJECT_ID]` is your GCP project ID).
* Leave the other fields blank.
1. Create an OAuth client ID: [APIs & Services → Credentials → Create credentials → OAuth client ID](https://console.cloud.google.com/apis/credentials/oauthclient).
* Application type: Web application
* Restrictions → Authorized JavaScript origins:
`https://www.endpoints.[PROJECT_ID].cloud.goog`
* Restrictions → Authorized redirect URIs:
`https://www.endpoints.[PROJECT_ID].cloud.goog/oauth2/callback`
* Click "Create".
You'll see a dialog containing the client ID and secret which we will add to your `config.sh` next.
## Update your config and redeploy
1. update your `config.sh` in the Google Cloud Storage bucket:
```shell
curl -fS "https://storage.googleapis.com/cloud-robotics-releases/run-install.sh" >run-install.sh
bash ./run-install.sh $PROJECT_ID --set-oauth
```
Enter the OAuth client ID and secret from the previous step when asked.
1. Update your cloud project:
```shell
bash ./run-install.sh $PROJECT_ID
```
After the update has been deployed, OAuth is enabled in your cloud project.
Verify that `oauth2-proxy` is running now:
```console
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
...
oauth2-proxy-xxx 1/1 Running 0 1m
```
## Try it out
Open a web browser and visit `https://www.endpoints.[PROJECT_ID].cloud.goog/grafana/dashboards`, replacing `[PROJECT_ID]` with your GCP project ID.
You'll be prompted to log in with your Google account, after which you'll see a list of dashboards.
Try selecting "Kubernetes Capacity Planning" to see the resource usage of the Kubernetes cluster.
================================================
FILE: docs/how-to/using-cloud-storage.md
================================================
# Using Cloud Storage from a robot
Estimated time: 20 minutes
This page describes a simple Cloud Storage transaction that demonstrates how Google Cloud APIs can be accessed without additional authentication configuration from within the robot's Kubernetes cluster.
Normally, to access a private Cloud Storage bucket from a robot, you'd need to manage a service account for the robot through Identity & Access Management (IAM). Cloud Robotics handles the robot's identity for you, so you can connect securely without additional configuration.
1. If you haven't already, complete the [Connecting a robot to the cloud](connecting-robot.md) steps.
1. Choose a name for the Cloud Storage bucket.
In the course of this guide, the robot will upload a file into a private bucket. The bucket namespace is global, so we must take care to choose a bucket name that is not in use yet by any other user of GCP. See also the [bucket naming requirements](https://cloud.google.com/storage/docs/naming), and [best practices](https://cloud.google.com/storage/docs/best-practices#naming).
For this guide we will assume a bucket name like `robot-hello-world-dc1bb474`, where the part after the last dash is a random hexadecimal number. You can generate your own unique bucket name with the command
```shell
echo robot-hello-world-$(tr -dc 'a-f0-9' < /dev/urandom | head -c8)
```
Note: If the bucket name is already in use, creating the bucket in the next step will fail. In this case, choose a different bucket name.
1. Create the Cloud Storage bucket.
On your workstation, run:
```shell
gcloud storage buckets create gs://[BUCKET_NAME]
```
Replace `[BUCKET_NAME]` with the name of the bucket you created, e.g., `robot-hello-world-dc1bb474`.
`gcloud storage` contains the sub-commands for accessing Cloud Storage, it is part of the `gcloud-sdk` package.
Note that the bucket is not publicly writable, as can be verified in the [Cloud Storage browser](https://console.cloud.google.com/storage/browser).
1. Drop a file into the bucket from the robot.
On the robot, run:
```console
docker pull python:alpine
kubectl run python --restart=Never --rm -ti --image=python:alpine -- /bin/sh
# apk add gcc musl-dev libffi-dev
# pip3 install google-cloud-storage
# python3
>>> from google.cloud import storage
>>> client = storage.Client()
>>> bucket = client.bucket("[BUCKET_NAME]")
>>> bucket.blob("hello_world.txt").upload_from_string("Hello, I am a robot!\n")
```
Replace `[BUCKET_NAME]` with the name of the bucket you created.
1. Verify that the file was uploaded.
On your workstation, run:
```shell
gcloud storage cat gs://[BUCKET_NAME]/hello_world.txt
```
This should result in the output `Hello, I am a robot!`.
So why was the robot able to drop a file in the non-public bucket? There is a lot going on in the background that enabled the configuration-less secure API access:
* When the robot was connected to the cloud, it generated a new private key and registered the corresponding public key in a device registry, e.g., as Kubernetes configmaps.
* The setup-robot command also started a Metadata Server as a workload in the robot's Kubernetes cluster. You can verify it is running with `kubectl get pods`. The Metadata Server identifies itself to the cloud using the robot's private key and obtains short-lived access tokens in the background.
* Every time a client library performs a call to a Google Cloud API, it asks the local Metadata Server for an access token.
* The permissions of the robot can be inspected and managed in the Cloud Console under "IAM & admin"; you will notice that there is a service account called `robot-service@[PROJECT_ID].iam.gserviceaccount.com`, which has "Storage Admin" permissions. These permissions allowed the robot to write to the private bucket.
What's next:
* You can experiment with accessing other Google Cloud APIs, such as [Logging](https://cloud.google.com/logging/docs/) or [Pub/Sub](https://cloud.google.com/pubsub/docs/), from the robot programmatically. Also, Python is not the only programming language with Google Cloud client libraries: the APIs can be accessed, e.g., from code written in [Go](https://cloud.google.com/storage/docs/reference/libraries#client-libraries-install-go) in a similar configuration-less manner.
* [Write your own service](deploying-service.md) that runs as a container in the cloud and provides an API that can be accessed securely from the robot.
================================================
FILE: docs/index.md
================================================
Google's Cloud Robotics Core is an open source platform that provides
infrastructure essential to building and running robotics solutions for business
automation. Cloud Robotics Core makes managing robot fleets easy for developers,
integrators, and operators. It enables:
* packaging and distribution of applications
* secure, bidirectional robot-cloud communication
* easy access to Google Cloud services such as ML, logging, and monitoring.

### Documentation
* [Quickstart](quickstart.md): Set up Cloud Robotics from binaries.
* [Overview](overview.md): Develop a deeper understanding of Cloud Robotics.
* Concepts
* Common: [Project configuration](concepts/config.md)
* Layer 1: [Federation](concepts/federation.md),
[Device Identity](concepts/device_identity.md)
* Layer 2: [Application Management](concepts/app-management.md)
* How-to guides
* [Deploying Cloud Robotics from sources](how-to/deploy-from-sources)
Build and deploy Cloud Robotics from the sources hosted on Github using
Bazel.
* [Running a ROS node as a Kubernetes deployment](how-to/running-ros-node.md)
Use Kubernetes to administer containerized workloads on a robot.
* [Setting up OAuth for web UIs](how-to/setting-up-oauth.md)
Use services like Grafana with a web browser.
* [Connecting a robot to the cloud](how-to/connecting-robot.md)
Enable secure communication between a robot and the Google Cloud Platform.
* [Using Cloud Storage from a robot](how-to/using-cloud-storage.md)
Programmatically store data from the robot with Cloud Storage.
* [Deploying a service to the cloud](how-to/deploying-service.md)
Run an API service in the cloud cluster and access it from a robot.
* [Deploying a gRPC service](how-to/deploying-grpc-service.md)
Run an gRPC service written in C++ in the cloud cluster and access it from a robot.
* [Creating a declarative API](how-to/creating-declarative-api.md)
Create a Kubernetes-style declarative API and run it on the cloud or on a robot.
* Development
* [Debugging authentication problems](developers/debug-auth.md)
Useful tips for working with Authentication and Authorization systems.
================================================
FILE: docs/overview.md
================================================
# Overview of Cloud Robotics Core
To understand Cloud Robotics Core, you should be familiar with the following concepts:
**Docker containers**
: Containers decouple applications from the environment in which they run. They let you deploy
applications easily and consistently, regardless of whether the target environment is a robot,
an on-premise data center, or the public cloud. Docker is a popular, open-source container format.
Read more about [Containers at Google](https://cloud.google.com/containers/).
**Kubernetes**
: Kubernetes is an open-source system to deploy, scale, and manage containerized applications
anywhere. It lets you deploy containerized applications onto your robots, run them on one or more
compute nodes and manage associated resources like configuration settings or networking. Read
more in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/).
**Helm**
: Helm is a tool for managing Kubernetes charts. Charts are packages of pre-configured Kubernetes
resources. Read more in the [Helm documentation](https://github.com/helm/helm/blob/master/README.md).
**Custom Resource (CR)**
: Custom resources are extensions of the Kubernetes API that let you manage application-specific
data, offering [many features](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#common-features).
To use a custom resource, you first have to create a Custom Resource Definition (CRD). Read more
about [extending the Kubernetes API with Custom Resource Definitions](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/).
## Layers
Cloud Robotics Core builds upon standard Kubernetes management tools and several open-source
packages. We structure Cloud Robotics Core into several layers that address distinct needs.

### Layer 0: Kubernetes on Robot
Layer 0 is an on-robot Kubernetes setup optimized for single-node clusters. It lets you deploy
containerized workloads onto the robot, run them on one or more compute nodes and manage associated
resources like configuration settings or networking without the overhead of a VM.
You can try it out by following the [Running a ROS node as a Kubernetes deployment](how-to/running-ros-node)
How-to Guide.
### Layer 1: Robot Fleet Connectivity and Security
Layer 1 provides secure communication and access control. Every robot is identified by a unique
keypair. The public key is managed in as Kubernetes configmap.
A cloud-based authorization service uses these keys to authenticate robots and generate short-lived
OAuth access tokens.
This approach follows the [BeyondCorp](https://cloud.google.com/beyondcorp/) zero trust network
model: all connections are authenticated and authorized individually, without a need for a
traditional VPN. A specific robot's access may be revoked if needed. The same set of credentials
also serves as foundation for Apps to securely communicate with the cloud over gRPC.
You can try it out by following the [Connecting a robot to the cloud](how-to/connecting-robot)
How-to Guide.
Layer 1 also provides a light-weight cluster federation system that synchronizes selected custom
resources across the fleet. This provides developers with a pattern for command & control that is
robust against intermittent connectivity and fits well with the overall declarative Kubernetes
model. You can read more in the concept guide about [Federation](concepts/federation.md).
> **Note:** Layer 2 and 3 (below) are currently under development. You'll find an early
> implementation of these layers in our repository but corresponding APIs and concepts are not stable yet.
### Layer 2: App Management
Layer 2 introduces App management, built as a lightweight facade on top of Kubernetes and the Helm
package manager. In Cloud Robotics Core, Apps consist of one or more Docker containers and
associated resources, that run on the robot and in the cloud. The App management layer determines
which Apps, and app components, run on robots and in the cloud. The concept guide has more details
on [Application Management](concepts/app-management.md).
### Layer 3: Managed Repositories
Layer 3 adds the capability to download Apps from remote repositories. Most of our core platform
services will be packaged as optional downloadable extensions via a Google-managed repository. In
addition, vendor-managed repositories can be added and used for locating new or updated vendor-
provided Apps.
Apps provide DevOps and robotics services such as:
* Logs aggregation and stack traces with StackDriver
* Metric collection, upload and dashboarding with Prometheus and Grafana
* Sensor data transport for cloud-based analysis
* Remote debugging using RViz over WebRTC
* Remote administration
================================================
FILE: docs/quickstart.md
================================================
# Quickstart
Estimated time: 10 min
This page describes how to set up a Google Cloud Platform (GCP) project
containing the Cloud Robotics Core (CRC) components.
In particular, this creates a cluster with Google Kubernetes Engine and prepares
it to accept connections from robots, which enables those robots to securely
communicate with GCP.
The commands were tested on machines running Debian (Stretch) or Ubuntu (16.04
and 18.04) Linux.
1. In the GCP Console, go to the [Manage resources][resource-manager] page and
select or create a project.
1. Make sure that [billing][modify-project] is enabled for your project.
1. [Install the Cloud SDK][cloud-sdk]. When prompted, choose the project you created above.
1. After installing the Cloud SDK, install the `kubectl` command-line tool:
```shell
gcloud components install kubectl gke-gcloud-auth-plugin
```
If you're using Cloud Shell, Debian, or Ubuntu, you may need to use apt instead:
```shell
apt-get install kubectl google-cloud-sdk-gke-gcloud-auth-plugin
```
1. Install tools required for installation:
```shell
sudo apt-get install curl tar xz-utils
```
## Deploy the project
1. Create application default credentials, which are used to deploy the cloud project.
```shell
gcloud auth application-default login
```
1. Create a directory for CRC installer.
```shell
mkdir cloud-robotics
cd cloud-robotics
```
1. Set your GCP project ID as an environment variable.
```shell
export PROJECT_ID=[YOUR_GCP_PROJECT_ID]
```
1. Install the latest nightly build into your GCP project by running the install script.
Accept the default configuration by hitting `ENTER` on all questions; you can change the settings later.
```shell
curl -fS "https://storage.googleapis.com/cloud-robotics-releases/run-install.sh" >run-install.sh
bash ./run-install.sh $PROJECT_ID
```
The install script created a Kubernetes cluster using Google Kubernetes Engine
and used [Synk][synk] to install the Cloud Robotics Core component helm charts.
You can browse these components on the [Workloads dashboard][workloads].
Alternatively, you can list them from the console on your workstation:
```console
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
cert-manager-xxx 1/1 Running 0 1m
nginx-ingress-xxx 1/1 Running 0 1m
oauth2-proxy-xxx 0/1 CrashLoopBackOff 4 1m
token-vendor-xxx 1/1 Running 0 1m
```
> **Note** Unless you already set up OAuth, the `oauth2-proxy` will show an error which we will ignore for now.
In addition to the cluster, the install script also created:
* the [[PROJECT_ID]-cloud-robotics-config bucket][storage-bucket], containing a `config.sh` and a Terraform state which are necessary to update your cloud project later,
* the [[PROJECT_ID]-robot Cloud Storage bucket][storage-bucket], containing the scripts that connect robots to the cloud, and
* the [Identity & Access Management policies][iam] that authorize robots and humans to communicate with GCP.
## Update the project
To update your Cloud Robotics configuration, run the install script with the `--set-config` flag.
```shell
bash ./run-install.sh $PROJECT_ID --set-config
```
This command only updates the config but does not update your cloud project.
To update the installation to the latest version and apply config changes, run the installer again.
```shell
bash ./run-install.sh $PROJECT_ID
```
If you deleted the install scipt or you want to run an update from another machine which has the Cloud SDK installed, simply run:
```
curl -fS "https://storage.googleapis.com/cloud-robotics-releases/run-install.sh"\
| bash -s -- $PROJECT_ID
```
## Clean up
The following command will delete:
* the [cloud-robotics Kubernetes cluster](https://console.cloud.google.com/kubernetes/list)
This can be useful if the cluster is in a broken state.
Be careful with this invocation, since you'll have to redeploy the project and reconnect any robots afterwards.
```shell
curl -fS "https://storage.googleapis.com/cloud-robotics-releases/run-install.sh"\
| bash -s -- $PROJECT_ID --delete
```
> **Known issue** After deleting CRC from your project, the endpoint services will be in a "pending deletion" state for 30 days.
> If you want to reinstall CRC into the same project again, you have to [undelete the services][undelete-service] manually.
If you want to completely shut down the project, see [the Resource Manager documentation][shutting_down_projects].
## Next steps
* [Connect a robot to the cloud](how-to/connecting-robot.md).
* [Set up OAuth](how-to/setting-up-oauth.md)
[resource-manager]: https://console.cloud.google.com/cloud-resource-manager
[modify-project]: https://cloud.google.com/billing/docs/how-to/modify-project
[cloud-sdk]: https://cloud.google.com/sdk/docs/
[workloads]: https://console.cloud.google.com/kubernetes/workload
[storage-bucket]: https://console.cloud.google.com/storage/browser
[iam]: https://console.cloud.google.com/iam-admin/iam
[undelete-service]: https://cloud.google.com/sdk/gcloud/reference/endpoints/services/undelete
[shutting_down_projects]: https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects
[synk]: https://github.com/googlecloudrobotics/core/tree/master/src/go/cmd/synk/README.md
================================================
FILE: new_versions.txt
================================================
{
"cert-manager": "1.13.2",
"ingress-nginx": "1.9.4",
"oauth2-proxy": "7.5.1",
"stackdriver-logging-agent": "1.10.1"
}
================================================
FILE: non_module_deps.bzl
================================================
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
# -- load statements -- #
def _non_module_deps_impl(ctx):
# Sysroot and libc
# How to upgrade:
# - Find image in https://storage.googleapis.com/chrome-linux-sysroot/ for amd64 for
# a stable Linux (here: Debian bullseye), of this pick a current build.
# - Verify the image contains expected /lib/x86_64-linux-gnu/libc* and defines correct
# __GLIBC_MINOR__ in /usr/include/features.h
# - If system files are not found, add them in ../BUILD.sysroot
http_archive(
name = "com_googleapis_storage_chrome_linux_amd64_sysroot",
build_file = Label("//bazel:BUILD.sysroot"),
sha256 = "5df5be9357b425cdd70d92d4697d07e7d55d7a923f037c22dc80a78e85842d2c",
urls = [
# features.h defines GLIBC 2.31.
"https://storage.googleapis.com/chrome-linux-sysroot/toolchain/4f611ec025be98214164d4bf9fbe8843f58533f7/debian_bullseye_amd64_sysroot.tar.xz",
],
)
http_archive(
name = "bazel_gomock",
urls = [
"https://github.com/jmhodges/bazel_gomock/archive/fde78c91cf1783cc1e33ba278922ba67a6ee2a84.tar.gz",
],
sha256 = "692421b0c5e04ae4bc0bfff42fb1ce8671fe68daee2b8d8ea94657bb1fcddc0a",
strip_prefix = "bazel_gomock-fde78c91cf1783cc1e33ba278922ba67a6ee2a84",
)
http_archive(
name = "kubernetes_helm",
urls = [
"https://get.helm.sh/helm-v2.17.0-linux-amd64.tar.gz",
],
sha256 = "f3bec3c7c55f6a9eb9e6586b8c503f370af92fe987fcbf741f37707606d70296",
strip_prefix = "linux-amd64",
build_file = "//third_party/helm2:BUILD.bazel",
)
http_archive(
name = "kubernetes_helm3",
urls = [
"https://get.helm.sh/helm-v3.9.0-linux-amd64.tar.gz",
],
sha256 = "1484ffb0c7a608d8069470f48b88d729e88c41a1b6602f145231e8ea7b43b50a",
strip_prefix = "linux-amd64",
build_file = "//third_party/helm3:BUILD.bazel",
)
http_archive(
name = "hashicorp_terraform",
urls = [
"https://releases.hashicorp.com/terraform/1.11.4/terraform_1.11.4_linux_amd64.zip",
],
sha256 = "1ce994251c00281d6845f0f268637ba50c0005657eb3cf096b92f753b42ef4dc",
build_file = "//third_party:terraform.BUILD",
)
http_archive(
name = "com_github_kubernetes_sigs_application",
urls = [
"https://github.com/kubernetes-sigs/application/archive/c8e2959e57a02b3877b394984a288f9178977d8b.tar.gz",
],
sha256 = "8bafd7fb97563d1a15d9afc68c87e3aabd664f60bd8005f1ae685d79842c1ac4",
strip_prefix = "application-c8e2959e57a02b3877b394984a288f9178977d8b",
build_file = "//third_party:app_crd.BUILD",
)
http_archive(
name = "ingress-nginx",
urls = [
"https://github.com/kubernetes/ingress-nginx/archive/refs/tags/controller-v1.8.0.tar.gz",
],
sha256 = "6e571764828b24545eea49582fd56d66d51fc66e52a375d98251c80c57fdb2fc",
strip_prefix = "ingress-nginx-controller-v1.8.0",
build_file = "//third_party:ingress-nginx.BUILD",
)
# -- repo definitions -- #
non_module_deps = module_extension(implementation = _non_module_deps_impl)
================================================
FILE: nvchecker.toml
================================================
[__config__]
oldver = "current_versions.txt"
newver = "new_versions.txt"
# containers
# git grep -E "^\s+image: " *.yaml | grep -v "{{"
[ingress-nginx]
source = "container"
# As of 2023-06-09, nvchecker is not compatible with registry.k8s.io, which
# doesn't return the WWW-Authenticate header that nvchecker expects, so you get
# UnsupportedAuthenticationError.
registry = "k8s.gcr.io"
container = "ingress-nginx/controller"
prefix = "v"
[oauth2-proxy]
source = "container"
registry = "quay.io"
container = "oauth2-proxy/oauth2-proxy"
prefix = "v"
[stackdriver-logging-agent]
source = "container"
registry = "gcr.io"
container = "stackdriver-agents/stackdriver-logging-agent"
# github packages
[cert-manager]
source = "github"
github = "jetstack/cert-manager"
use_latest_release = true
prefix = "v"
# Does find the version, try alternative at the bottom
#[kube-prometheus-stack]
#source = "github"
#github = "prometheus-community/helm-charts"
#path = "charts/kube-prometheus-stack"
#use_max_tag = true
# TODO(ensonic): requires auth token
# use_latest_tag = true
#prefix = "kube-prometheus-stack"
# Cover helm repos:
# https://medium.com/bigdatarepublic/software-versioning-on-kubernetes-806a48480832
================================================
FILE: scripts/BUILD.bazel
================================================
exports_files([
"common.sh",
"config.sh",
"include-config.sh",
"set-config.sh",
])
================================================
FILE: scripts/backup_robots.sh
================================================
#!/bin/bash
#
# Copyright 2021 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# see https://github.com/kubernetes/kubernetes/issues/90066#issuecomment-780236185 for hiding managed-fields
# another tool: https://github.com/itaysk/kubectl-neat
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${DIR}/common.sh"
if ! hash jq 2>/dev/null; then
die "This script needs jq (apt install jq)."
fi
if ! hash yq 2>/dev/null; then
die "This script needs yq (pip3 install yq)."
fi
if [[ -z "${CLOUD_ROBOTICS_CTX}" ]]; then
die "CLOUD_ROBOTICS_CTX needs specify the cluster context that shoudl be backed up".
fi
kc get robots -o yaml | \
yq 2>/dev/null -ry '.items[] | del(.metadata.annotations["kubectl.kubernetes.io/last-applied-configuration"],.metadata.creationTimestamp,.metadata.generation,.metadata.managedFields,.metadata.resourceVersion,.metadata.selfLink,.metadata.uid,.status)' -
echo "---"
kc get cm -n app-token-vendor -o yaml -l app.kubernetes.io/managed-by=token-vendor | \
yq 2>/dev/null -ry '.items[] | del(.metadata.creationTimestamp,.metadata.resourceVersion,.metadata.selfLink,.metadata.uid)' -
================================================
FILE: scripts/check-images.sh
================================================
#!/bin/bash
#
# Copyright 2019 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Run with 'json' or 'text' as a first arg to select the format.
#
# If you don't have the API enabled run (and wait a day to get results):
# gcloud --project ${GCP_PROJECT_ID} services enable containeranalysis.googleapis.com
#
# Postprocessing examples:
# - grep "Critical" /tmp/cve-check.demo.txt | sed -e 's/ Critical (\([0-9]*\)):/\1/g' | paste -s -d+ | bc
#
# Almost all of our images are built with distroless (bazel xxx_image rules). If there are
# vulnerabilities,
# 1.) check that we are using an up-to-date rules_docker in WORKSPACE. Check upstream
# for recent commits that update the distroless base images and if that does not help
# 2.) check https://github.com/GoogleContainerTools/distroless/commits/master
# for fixes, if there are some, clone rules_docker, run ./update_deps.sh and sent a PR.
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${DIR}/common.sh"
source "${DIR}/include-config.sh"
function json {
need_delimiter=0
echo "["
for image in $(gcloud container images list --format='csv[no-heading](name)' --repository=${CLOUD_ROBOTICS_CONTAINER_REGISTRY}); do
if [[ $need_delimiter == 0 ]]; then
need_delimiter=1
else
echo ","
fi
gcloud --project ${GCP_PROJECT_ID} alpha container images describe --show-package-vulnerability --format=json ${image}:${DOCKER_TAG} || \
need_delimiter=0
done
echo "]"
}
function text {
for image in $(gcloud container images list --format='csv[no-heading](name)' --repository=${CLOUD_ROBOTICS_CONTAINER_REGISTRY}); do
# Filter noise
gcloud --project ${GCP_PROJECT_ID} alpha container images describe --show-package-vulnerability ${image}:${DOCKER_TAG} | \
egrep -v '^\s*(registry|repository|digest):'
done
}
if [[ -z "$2" ]]; then
die "Usage: $0 "
fi
include_config "$2"
# call arguments verbatim:
"$@"
================================================
FILE: scripts/common.sh
================================================
#!/bin/bash
#
# Copyright 2019 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function die {
echo "$1" >&2
exit 1
}
function is_source_install {
# This file is present in the root folder only when installing from a binary.
[[ ! -e "$(dirname "${BASH_SOURCE[0]}")/../INSTALL_FROM_BINARY" ]]
}
function log {
local project
project=$1
shift
gcloud logging write cloud-robotics-deploy \
--severity=INFO \
--project=${project} \
--payload-type=json \
"$(cat </dev/null) || saved_ctx=""
trap "[[ -n \"${saved_ctx}\" ]] && kubectl config use-context \"${saved_ctx}\"; trap - RETURN" RETURN
local location
location=$(gcloud container clusters list --filter="name=${name}" --format='value(location)' --project="${project}")
case "${location}" in
${region})
gcloud container clusters get-credentials "${name}" \
--region "${region}" \
--project "${project}" \
;;
${zone})
gcloud container clusters get-credentials "${name}" \
--zone "${zone}" \
--project "${project}" \
;;
esac
}
# Build GKE context name for existing cluster
function gke_context_name {
local project
project="$1"
local cluster_name
name="$2"
local region
region="$3"
local zone
zone="$4"
local location
location=$(gcloud container clusters list --filter="name=${name}" --format='value(location)' --project="${project}")
if [[ "${location}" == "${zone}" || "${location}" == "${region}" ]]; then
echo "gke_${project}_${location}_${name}"
fi
}
function kc {
kubectl --context="${CLOUD_ROBOTICS_CTX}" "$@"
}
================================================
FILE: scripts/config.sh
================================================
#!/bin/bash
#
# Copyright 2024 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Escapes the input "foo bar" -> "foo\ bar".
function escape {
sed 's/[^a-zA-Z0-9,._+@%/-]/\\&/g' <<< "$@"
}
# Escapes the input twice "foo bar" -> "foo\\\ bar"
function double_escape {
sed 's/[^a-zA-Z0-9,._+@%/-]/\\\\\\&/g' <<< "$@"
}
# Creates a substitution pattern for sed using an unprintable char as seperator.
# This allows the user to use any normal char in the input.
function sed_pattern {
local regexp="$1"
local replacement="$2"
echo s$'\001'${regexp}$'\001'${replacement}$'\001'
}
# Sets the given variable in config.sh. If $value is empty, the variable
# assignement is commented out in config.sh.
function save_variable {
local config_file="$1"
local name="$2"
local value="$3"
if [[ -z "${value}" ]]; then
sed -i "s/^\(${name}=.*\)$/#\1/" "${config_file}"
elif grep -q "^\(# *\)\{0,1\}${name}=" "${config_file}"; then
value=$( double_escape ${value} )
sed -i "$( sed_pattern "^\(# *\)\{0,1\}${name}=.*$" "${name}=${value}" )" "${config_file}"
else
value=$( escape ${value} )
echo >>"${config_file}"
echo "${name}=${value}" >>"${config_file}"
fi
}
================================================
FILE: scripts/include-config.sh
================================================
#!/usr/bin/env bash
#
# Copyright 2019 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Includes the configuration variables from a config.sh.
function check_vars_not_empty {
for v in "$@"; do
[ -n "${!v}" ] || die "Variable $v is not set or is empty"
done
}
function check_var_is_one_of {
local var_name="$1"
local allowed_values="${*:2}"
local found=false
for allowed_value in ${allowed_values}; do
if [[ "${!var_name}" = "${allowed_value}" ]]; then
found=true
fi
done
if [[ "${found}" = false ]]; then
die "Variable ${var_name} has to be one of [${allowed_values}], but was ${!var_name}"
fi
}
function include_config {
local project="$1"
source <(gcloud storage cat "gs://${project}-cloud-robotics-config/config.sh")
# Check that config defines the following set of configuration variables
check_vars_not_empty GCP_PROJECT_ID GCP_REGION GCP_ZONE
if is_source_install; then
# Keep default in sync with src/go/pkg/configutil/config-reader.go
CLOUD_ROBOTICS_CONTAINER_REGISTRY=${CLOUD_ROBOTICS_CONTAINER_REGISTRY:-"gcr.io/${GCP_PROJECT_ID}"}
SOURCE_CONTAINER_REGISTRY=${CLOUD_ROBOTICS_CONTAINER_REGISTRY}
else
SOURCE_CONTAINER_REGISTRY=${SOURCE_CONTAINER_REGISTRY:-gcr.io/cloud-robotics-releases}
fi
CLOUD_ROBOTICS_DEPLOY_ENVIRONMENT=${CLOUD_ROBOTICS_DEPLOY_ENVIRONMENT:-GCP}
check_var_is_one_of CLOUD_ROBOTICS_DEPLOY_ENVIRONMENT "GCP" "GCP-testing"
GKE_CLUSTER_TYPE=${GKE_CLUSTER_TYPE:-zonal}
check_var_is_one_of GKE_CLUSTER_TYPE "zonal" "regional"
GKE_DATAPATH_PROVIDER=${GKE_DATAPATH_PROVIDER:-DATAPATH_PROVIDER_UNSPECIFIED}
check_var_is_one_of GKE_DATAPATH_PROVIDER "DATAPATH_PROVIDER_UNSPECIFIED" "ADVANCED_DATAPATH"
}
================================================
FILE: scripts/migrate.sh
================================================
#!/bin/bash
#
# Copyright 2025 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# helper functions to update from older installations
# ./migrate.sh
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${DIR}/common.sh"
source "${DIR}/config.sh"
source "${DIR}/include-config.sh"
set -o pipefail -o errexit
# Required or terraform will fail deleting the IoT registry
function cleanup_iot_devices {
gcloud services list --project="${GCP_PROJECT_ID}" | grep -q cloudiot.googleapis.com || return
local iot_registry_name="cloud-robotics"
gcloud beta iot registries list --project="${GCP_PROJECT_ID}" --region="${GCP_REGION}" | grep -q "${iot_registry_name}" || return
local devices
devices=$(gcloud beta iot devices list \
--project "${GCP_PROJECT_ID}" \
--region "${GCP_REGION}" \
--registry "${iot_registry_name}" \
--format='value(id)')
if [[ -n "${devices}" ]] ; then
echo "Clearing IoT devices from ${iot_registry_name}" 1>&2
for dev in ${devices}; do
gcloud beta iot devices delete \
--quiet \
--project "${GCP_PROJECT_ID}" \
--region "${GCP_REGION}" \
--registry "${iot_registry_name}" \
${dev}
done
fi
}
function cleanup_helm_data {
# Delete all legacy HELM resources. Do not delete the Helm charts directly, as
# we just want to keep the resources and have synk "adopt" them.
kc delete cm ready-for-synk 2> /dev/null || true
kc delete cm synk-enabled 2> /dev/null || true
kc -n kube-system delete deploy tiller-deploy 2> /dev/null || true
kc -n kube-system delete service tiller-deploy 2> /dev/null || true
kc -n kube-system delete cm -l OWNER=TILLER 2> /dev/null || true
}
function cleanup_old_cert_manager {
# Uninstall and cleanup older versions of cert-manager if needed
echo "checking for old cert manager .."
kc &>/dev/null get deployments cert-manager || return 0
installed_ver=$(kc get deployments cert-manager -o=go-template --template='{{index .metadata.labels "helm.sh/chart"}}' | rev | cut -d'-' -f1 | rev | tr -d "vV")
echo "have cert manager $installed_ver"
if [[ "$installed_ver" == 0.5.* ]]; then
echo "need to cleanup old version"
# see https://docs.cert-manager.io/en/latest/tasks/upgrading/upgrading-0.5-0.6.html#upgrading-from-older-versions-using-helm
# and https://docs.cert-manager.io/en/latest/tasks/backup-restore-crds.html
# cleanup
synk_version=$(kc get resourcesets.apps.cloudrobotics.com --output=name | grep cert-manager | cut -d'/' -f2)
echo "deleting resourceset ${synk_version}"
${SYNK} delete ${synk_version} -n default
kc delete crd \
certificates.certmanager.k8s.io \
issuers.certmanager.k8s.io \
clusterissuers.certmanager.k8s.io
fi
if [[ "$installed_ver" == 0.8.* ]]; then
echo "need to cleanup old version"
# see https://cert-manager.io/docs/installation/upgrading/upgrading-0.8-0.9/
# and https://cert-manager.io/docs/installation/upgrading/upgrading-0.9-0.10/
# cleanup
kc delete deployments --namespace default \
cert-manager \
cert-manager-cainjector \
cert-manager-webhook
kc delete -n default issuer cert-manager-webhook-ca cert-manager-webhook-selfsign
kc delete -n default certificate cert-manager-webhook-ca cert-manager-webhook-webhook-tls
kc delete apiservice v1beta1.admission.certmanager.k8s.io
fi
if [[ "$installed_ver" == 0.10.* ]]; then
echo "need to cleanup old version"
# cleanup deployments
kc delete deployments --namespace default \
cert-manager \
cert-manager-cainjector \
cert-manager-webhook
echo "Wait until cert-manager pods are deleted"
kc wait pods -l app.kubernetes.io/instance=cert-manager -n default --for=delete --timeout=35s
# delete existing cert-manager resources
kc delete Issuers,ClusterIssuers,Certificates,CertificateRequests,Orders,Challenges --all-namespaces --all
# Delete old webhook ca and tls secrets
kc delete secrets --namespace default cert-manager-webhook-ca cert-manager-webhook-tls
# cleanup crds
kc delete crd \
certificaterequests.certmanager.k8s.io \
certificates.certmanager.k8s.io \
challenges.certmanager.k8s.io \
clusterissuers.certmanager.k8s.io \
issuers.certmanager.k8s.io \
orders.certmanager.k8s.io
# cleanup apiservices
kc delete apiservices v1beta1.webhook.certmanager.k8s.io
fi
# This is now installed as part of base-cloud
kc delete resourcesets.apps.cloudrobotics.com -l name=cert-manager 2>/dev/null || true
}
# main
if [[ "$#" -lt 2 ]] || [[ ! "$1" =~ ^(cleanup_helm_data|cleanup_old_cert_manager|cleanup_iot_devices)$ ]]; then
die "Usage: $0 {cleanup_helm_data|cleanup_old_cert_manager|cleanup_iot_devices} "
fi
include_config $2
# log and call arguments verbatim:
log $2 $0 $1
"$@"
================================================
FILE: scripts/pre-commit
================================================
#!/bin/bash
# git hook to ensure code style
# ln -sf ../../scripts/pre-commit .git/hooks/
# shellcheck disable=2044,2046
# This script can't handle spaces in filenames. That would be challenging to do
# correctly, and we will hopefully never add a filename with a space to the
# repository.
set -o pipefail
result=0
# Allow to call the pre-commit hook with a list of files. This allows to run the
# script from the command line like this to check all files:
# $ ./scripts/pre-commit $(git ls-files)
files="$*"
if [ -z "$files" ]; then
files="$(git diff --name-only --staged --diff-filter=ACMRTUXB)"
fi
function files_matching {
local include="$1"
local exclude="$2"
# The diff-filter lists all but deleted files. The `echo` puts the output on
# one line for easier copy-pasting.
if [[ -z "${exclude}" ]]; then
echo $(echo "$files" | tr ' ' '\n' | grep -E "${include}")
else
echo $(echo "$files" | tr ' ' '\n' | grep -E "${include}" | grep -vE "${exclude}")
fi
}
go_files=$(files_matching "\.go$")
if [ -n "$go_files" ]; then
# meh, gofmt does not set an exit code
# TODO(rodrigoq): this will break if the filenames have spaces
diff=$(gofmt -d -e $go_files)
if [ -n "$diff" ]; then
echo "$diff"
files_to_fix=$(gofmt -l $go_files)
echo "To fix, run: gofmt -w $files_to_fix"
result=1
fi
fi
py_files=$(files_matching "\.py$")
if [ -n "$py_files" ]; then
which >/dev/null autopep8 || (echo "Please install autopep8"; exit 1)
# TODO(rodrigoq): this will break if the filenames have spaces
diff=$(autopep8 -d $py_files)
if [ -n "$diff" ]; then
echo "$diff"
echo "To fix, run: autopep8 -i $py_files"
result=1
fi
fi
build_files=$(echo "$files" | tr ' ' '\n' \
| grep -E "BUILD|WORKSPACE|[.]bzl")
if [ -n "$build_files" ]; then
which >/dev/null buildifier || (echo "Please install buildifier"; exit 1)
diff=$(buildifier -d $build_files)
if [ -n "$diff" ]; then
echo "$diff"
echo "To fix, run: buildifier" \
$(echo $(buildifier -mode=check $build_files | cut -d' ' -f1))
result=1
fi
fi
# Run Gazelle if a Go or BUILD file changes. This is a heuristic, but hopefully
# covers most cases where it is needed.
for workspace_dir in $(find -name WORKSPACE -printf "%h\n"); do
bzl="$workspace_dir/BUILD.bazel"
if [ -e "$bzl" ] && grep -q "gazelle(" "$bzl"; then
# This calls gazelle for every workspace, affected or not.
if [[ -n "$go_files" || -n "$build_files" ]]; then
diff=$(cd ${workspace_dir} && bazel run :gazelle -- -mode=diff 2>/dev/null)
if [ -n "$diff" ]; then
echo "$diff"
echo "To fix:"
echo " bazel run :gazelle"
result=1
fi
fi
fi
done
ts_files=$(files_matching "\.ts$")
if [ -n "$ts_files" ]; then
which >/dev/null clang-format || (echo "Please install clang-format"; exit 1)
diff=$(diff -u <(cat $ts_files) <(clang-format $ts_files))
if [ -n "$diff" ]; then
echo "$diff"
echo "To fix, run: clang-format -i $ts_files"
result=1
fi
fi
cpp_files=$(files_matching "\.(h|cc)$")
if [ -n "$cpp_files" ]; then
which >/dev/null clang-format || (echo "Please install clang-format"; exit 1)
diff=$(diff -u <(cat $cpp_files) <(clang-format -style=google $cpp_files))
if [ -n "$diff" ]; then
echo "$diff"
echo "To fix, run: clang-format -style=google -i $cpp_files"
result=1
fi
fi
tf_files=$(files_matching "\.tf$")
if [ -n "$tf_files" ]; then
if [[ -f WORKSPACE ]] ; then
# cloud-robotics
bazel build @hashicorp_terraform//:terraform
TERRAFORM="${PWD}/bazel-out/../../../external/hashicorp_terraform/terraform"
else
# infrastructure
TERRAFORM="/google/data/ro/teams/terraform/bin/terraform"
fi
tf_dirs=$(dirname $tf_files | sort | uniq)
for tf_dir in $tf_dirs; do
if ! ${TERRAFORM} fmt -write=false -list=false -check=true $tf_dir; then
${TERRAFORM} fmt -write=false -list=false -diff=true $tf_dir
echo "To fix, run: ${TERRAFORM} fmt $tf_dir"
result=1
fi
done
fi
# Run check when either a markdown file changes (it may have new embeddings) or
# an example file changes (it may have to be embedded).
# TODO(rodrigoq): only run if these files contain an embedmd tag.
md_files=$(files_matching "\.md$")
# TODO(rodrigoq): find a better way of checking if a file is embedded anywhere.
example_files=$(files_matching "example")
if [[ -n "$md_files" || -n "$example_files" ]]; then
EMBEDMD=${GOPATH:-$HOME/go}/bin/embedmd
if [[ ! -f "$EMBEDMD" ]] && ! go install github.com/campoy/embedmd@latest ; then
echo "ERROR: embedmd not found and couldn't be installed." >&2
result=1
else
# An unchanged .md file may still have changes in the embedded files.
all_md_files=$(git ls-files | grep --color=never '\.md$')
diff=$($EMBEDMD -d $all_md_files)
if [[ $? -ne 0 ]] ; then
echo "$diff"
echo "To fix, run: $EMBEDMD -w \$(git ls-files | grep --color=never '\\.md$')"
result=1
fi
fi
fi
SHELLCHECK_DIR="$HOME/.cache/cloud-robotics"
SHELLCHECK="${SHELLCHECK_DIR}/shellcheck-v0.6.0/shellcheck"
sh_files=$(files_matching "(\.sh$|pre-commit$)" "deployments/.*/config.sh")
# TODO(rodrigoq): enable shellcheck in the apps repo.
if [[ -n "$sh_files" && "$(basename "$PWD")" != "apps" ]] ; then
if [[ ! -e "$SHELLCHECK" ]] ; then
mkdir -p "$SHELLCHECK_DIR"
curl -fsSL https://github.com/koalaman/shellcheck/releases/download/v0.6.0/shellcheck-v0.6.0.linux.x86_64.tar.xz \
| tar -C "$SHELLCHECK_DIR" -xJf - || exit 1
fi
# Note: using a lower severity than `warning` is quite noisy.
# SC1090 complains not being able able to follow `source "${DIR}/scripts/common.sh"`,
# which is a common pattern and we don't benefit from shellcheck following the
# `source` statement.
if ! "$SHELLCHECK" --severity=warning -e SC1090 --external-sources $sh_files ; then
echo "ERROR: shellcheck found issues. These need to be fixed manually." >&2
result=1
fi
fi
exit $result
================================================
FILE: scripts/robot-sim.sh
================================================
#!/bin/bash
#
# Copyright 2019 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Manage simulated robots
#
# Simulated robots are implemented as a separate cluster, running the same
# components like a physical robot in addition to the robot simulator.
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${DIR}/common.sh"
source "${DIR}/include-config.sh"
set -o pipefail -o errexit
function set_defaults {
local GCP_PROJECT_ID="$1"
include_config "${GCP_PROJECT_ID}"
if [[ -z "${ROBOT_LABELS}" ]]; then
ROBOT_LABELS="simulated=true"
fi
}
function create {
local GCP_PROJECT_ID="$1"
local ROBOT_NAME="$2"
local ROBOT_TYPE="${3:-mir-100}"
set_defaults "${GCP_PROJECT_ID}"
local GKE_SIM_CONTEXT="gke_${GCP_PROJECT_ID}_${GCP_ZONE}_${ROBOT_NAME}"
# Create cloud cluster for robot simulation unless already exists.
# To more accurately simulate a robot cluster, this uses the
# robot-service@ service account instead of enabling Workload Identity, as we
# don't have any on-prem/robot equivalent to that.
gcloud >/dev/null 2>&1 container clusters describe "${ROBOT_NAME}" \
--zone=${GCP_ZONE} --project=${GCP_PROJECT_ID} || \
gcloud container clusters create "${ROBOT_NAME}" \
--enable-legacy-authorization \
--machine-type="e2-standard-2" \
--num-nodes=1 \
--max-nodes=2 \
--enable-ip-alias \
--issue-client-certificate \
--no-enable-basic-auth \
--metadata disable-legacy-endpoints=true \
--scopes gke-default,cloud-platform \
--service-account "robot-service@${GCP_PROJECT_ID}.iam.gserviceaccount.com" \
--zone=${GCP_ZONE} \
--project=${GCP_PROJECT_ID}
gke_get_credentials "${GCP_PROJECT_ID}" "${ROBOT_NAME}" "${GCP_REGION}" "${GCP_ZONE}"
POD_CIDR=$(gcloud container clusters describe "${ROBOT_NAME}" \
--project=${GCP_PROJECT_ID} \
--zone=${GCP_ZONE} \
| grep podIpv4CidrBlock | awk '{print $2;}')
# shellcheck disable=2097 disable=2098
KUBE_CONTEXT=${GKE_SIM_CONTEXT} \
HOST_HOSTNAME="nic0.${ROBOT_NAME}${GCP_ZONE}.c.${GCP_PROJECT_ID}.internal.gcpnode.com" \
ACCESS_TOKEN=$(gcloud auth application-default print-access-token) \
$DIR/../src/bootstrap/robot/setup_robot.sh \
${ROBOT_NAME} \
--project ${GCP_PROJECT_ID} \
--robot-type "${ROBOT_TYPE}" \
--fluentd=false \
--fluentbit=false \
--running-on-gke=true \
--pod-cidr "${POD_CIDR}" \
--labels "${ROBOT_LABELS}"
}
function delete {
local GCP_PROJECT_ID="$1"
local ROBOT_NAME="$2"
set_defaults "${GCP_PROJECT_ID}"
kubectl --context=${CLOUD_ROBOTICS_CTX} delete robots.registry.cloudrobotics.com "${ROBOT_NAME}" || true
gcloud container clusters delete "${ROBOT_NAME}" \
--zone=${GCP_ZONE} --project=${GCP_PROJECT_ID}
}
# Alias for create.
function update {
create "$@"
}
# main
if [[ "$#" -lt 3 ]]; then
die "Usage: $0 {create|delete|update} []"
fi
# call arguments verbatim:
"$@"
================================================
FILE: scripts/set-config.sh
================================================
#!/bin/bash
#
# Copyright 2019 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(skopecki)
# * Consider setting CLOUD_ROBOTICS_SHARED_OWNER_GROUP and
# APP_MANAGEMENT as well.
set -o pipefail -o errexit
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
source "${DIR}/scripts/common.sh"
source "${DIR}/scripts/config.sh"
# Reads a variable from user input.
function read_variable {
local target_var="$1"
local question="$2"
local default="$3"
echo
echo "${question}"
if [[ -n "${default}" ]]; then
echo -n "[ENTER] for \"${default}\": "
fi
read -er input
if [[ -z "${input}" ]]; then
# shellcheck disable=SC2046
eval ${target_var}=$( escape ${default} )
else
# shellcheck disable=SC2046
eval ${target_var}=$( escape ${input} )
fi
}
# Outputs the variable to the user.
function print_variable {
local description="$1"
local value="$2"
if [[ -n "${value}" ]]; then
echo "${description}: ${value}"
fi
}
# Asks a yes/no question and returns the mapped input.
function ask_yn {
local question="$1"
local default="$2"
echo
echo -n "$question"
if [[ "${default}" = "n" ]]; then
echo -n " [yN] "
else
echo -n " [Yn] "
fi
while true; do
read -n 1 input
if [[ -z "${input}" ]]; then
if [[ "${default}" = "n" ]]; then
return 1
else
return 0
fi
fi
echo
if [[ "${input}" =~ y|Y ]]; then
return 0
elif [[ "${input}" =~ n|N ]]; then
return 1
fi
echo -n "Please answer with 'y' or 'n'. "
done
}
# Parse flags.
if [[ ! "$1" = --* ]]; then
GCP_PROJECT_ID="$1"
fi
for arg in "$@"; do
if [[ "${arg}" = "--ensure-config" ]]; then
FLAG_ENSURE_CONFIG=1
elif [[ "${arg}" = "--edit-oauth" ]]; then
FLAG_EDIT_OAUTH=1
fi
done
if [[ -z "${GCP_PROJECT_ID}" ]]; then
echo
echo "Usage: $0 []"
echo "Supported options:"
echo " --ensure-config Does nothing if a config exists already."
echo " --edit-oauth Enables and configures OAuth."
die
fi
# Load config if it exists.
CLOUD_BUCKET="gs://${GCP_PROJECT_ID}-cloud-robotics-config"
CONFIG_FILE="$(mktemp)"
trap '{ rm -f ${CONFIG_FILE}; }' EXIT
if gcloud storage cp "${CLOUD_BUCKET}/config.sh" "${CONFIG_FILE}" 2>/dev/null; then
if [[ -n "${FLAG_ENSURE_CONFIG}" ]]; then
echo "Found Cloud Robotics config."
exit 0
fi
source ${CONFIG_FILE}
else
if [[ -n "${FLAG_EDIT_OAUTH}" ]]; then
die "You have to create a config before you can enable OAuth."
fi
cp ${DIR}/config.sh.tmpl ${CONFIG_FILE}
fi
# Check that the project exists and we have access.
gcloud projects describe "${GCP_PROJECT_ID}" >/dev/null \
|| die "ERROR: unable to access Google Cloud project: ${GCP_PROJECT_ID}"
function set_default_vars {
# Enable Compute Engine API which is necessary to validate the zones.
if ! gcloud services list --enabled --project ${GCP_PROJECT_ID} \
| grep "^compute.googleapis.com \+" >/dev/null; then
# TODO(skopecki) This can take a minute. Find a better solution to verify compute zones.
echo "Enabling Compute Engine API..."
gcloud services enable compute.googleapis.com --project ${GCP_PROJECT_ID}
fi
# Ask for region and zone.
GCP_ZONE=${GCP_ZONE:-"europe-west1-c"}
read_variable GCP_ZONE "In which zone should Cloud Robotics be deployed?" "${GCP_ZONE}"
# Verify the zone exists.
gcloud compute zones list -q --project "${GCP_PROJECT_ID}" --uri | grep -q "zones/${GCP_ZONE}$" \
|| die "ERROR: the zone does not exist in your project: ${GCP_ZONE}"
GCP_REGION=${GCP_ZONE%-?}
# Ask for gke cluster type
GKE_CLUSTER_TYPE="zonal"
while :; do
read_variable GKE_CLUSTER_TYPE "Should the cluster be 'zonal' or 'regional'?" "${GKE_CLUSTER_TYPE}"
if [[ "${GKE_CLUSTER_TYPE}" == "zonal" || "${GKE_CLUSTER_TYPE}" == "regional" ]]; then
break
fi
echo "Value must be one of: 'zonal','regional'"
done
# Use dataplane_v2 for all new projects
GKE_DATAPATH_PROVIDER="ADVANCED_DATAPATH"
# Ask for Terraform bucket and location.
OLD_TERRAFORM_GCS_BUCKET="${TERRAFORM_GCS_BUCKET}"
OLD_TERRAFORM_GCS_PREFIX="${TERRAFORM_GCS_PREFIX}"
if [[ -z "${TERRAFORM_GCS_BUCKET}" ]]; then
OLD_TF_LOCATION="${CLOUD_BUCKET}/terraform"
else
OLD_TF_LOCATION="gs://${TERRAFORM_GCS_BUCKET}/${TERRAFORM_GCS_PREFIX}"
fi
read_variable TF_LOCATION "In which GCP storage folder should your Terraform state be stored?" \
"${OLD_TF_LOCATION}"
TF_LOCATION_REGEX='^\(gs://\)\?\([-_a-zA-Z0-9]\+\)\(\/\(.*[^/]\)\)\?/\?$'
TERRAFORM_GCS_BUCKET=$( echo "${TF_LOCATION}" | sed "s#${TF_LOCATION_REGEX}#\2#;q" ) \
|| die "ERROR: Invalid GCP storage folder. Accepted format: gs:///"
TERRAFORM_GCS_PREFIX=$( echo "${TF_LOCATION}" | sed "s#${TF_LOCATION_REGEX}#\4#;q" )
# Docker registries.
if is_source_install; then
read_variable CLOUD_ROBOTICS_CONTAINER_REGISTRY \
"Which Docker registry do you want to use when installing from sources? Use \"default\" for gcr.io/${GCP_PROJECT_ID}." \
"${CLOUD_ROBOTICS_CONTAINER_REGISTRY:-default}"
if [[ "${CLOUD_ROBOTICS_CONTAINER_REGISTRY}" == "default" ]]; then
CLOUD_ROBOTICS_CONTAINER_REGISTRY=
fi
fi
# TODO(skopecki) If CLOUD_ROBOTICS_CONTAINER_REGISTRY is private and does not belongs to this GCR project,
# it could be added automatically to PRIVATE_DOCKER_PROJECTS.
read_variable PRIVATE_DOCKER_PROJECTS \
"Do you need to read private Docker images from a GCR project? Space-separated list of alphanumeric project ids, or \"none\" for none." \
"${PRIVATE_DOCKER_PROJECTS:-none}"
if [[ "${PRIVATE_DOCKER_PROJECTS}" == "none" ]]; then
PRIVATE_DOCKER_PROJECTS=
fi
# Certificate provider
set_certificate_provider_vars
}
function set_oauth_vars {
echo "Follow https://googlecloudrobotics.github.io/core/how-to/setting-up-oauth.html to obtain OAuth client id and secret."
read_variable CLOUD_ROBOTICS_OAUTH2_CLIENT_ID "Enter OAuth client id." \
"${CLOUD_ROBOTICS_OAUTH2_CLIENT_ID}"
read_variable CLOUD_ROBOTICS_OAUTH2_CLIENT_SECRET "Enter OAuth client secret." \
"${CLOUD_ROBOTICS_OAUTH2_CLIENT_SECRET}"
if [[ -z "${CLOUD_ROBOTICS_COOKIE_SECRET}" ]] ||\
ask_yn "Generate new cookie secret?" "n"; then
CLOUD_ROBOTICS_COOKIE_SECRET="$( head -c 16 /dev/urandom | base64 )"
fi
}
function set_certificate_provider_vars {
CA_OPTIONS="lets-encrypt, google-cas"
CA_DEFAULT="lets-encrypt"
# Select provider
read_variable CLOUD_ROBOTICS_CERTIFICATE_PROVIDER \
"Select the certificate provider. Should be one of: ${CA_OPTIONS}." \
"${CLOUD_ROBOTICS_CERTIFICATE_PROVIDER:-${CA_DEFAULT}}"
# Request certificate configuration if the provider requires it
if [[ ! "lets-encrypt" =~ (" "|^)"${CLOUD_ROBOTICS_CERTIFICATE_PROVIDER}"(" "|$) ]]; then
set_certificate_vars
fi
}
function set_certificate_vars {
echo "Configuring certificate information."
echo "Refer to RFC 4519 for explanations of the fields: https://datatracker.ietf.org/doc/html/rfc4519#section-2"
read_variable CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_ORGANIZATION \
"Organization (O)" \
"${CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_ORGANIZATION:-${GCP_PROJECT_ID}}"
read_variable CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_COMMON_NAME \
"Common Name (CN)" \
"${CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_COMMON_NAME:-${GCP_PROJECT_ID}}"
read_variable CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_ORGANIZATIONAL_UNIT \
"(Optional) Organizational Unit (OU)" \
"${CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_ORGANIZATIONAL_UNIT}"
}
if [[ -n "${FLAG_EDIT_OAUTH}" ]]; then
set_oauth_vars
else
set_default_vars
fi
# Output configuration before saving.
echo
echo " Your configuration"
echo "========================"
print_variable "GCP project ID" "${GCP_PROJECT_ID}"
print_variable "GCP region" "${GCP_REGION}"
print_variable "GCP zone" "${GCP_ZONE}"
print_variable "GKE cluster type" "${GKE_CLUSTER_TYPE}"
print_variable "GKE datapath provider" "${GKE_DATAPATH_PROVIDER}"
print_variable "Terraform state bucket" "${TERRAFORM_GCS_BUCKET}"
print_variable "Terraform state directory" "${TERRAFORM_GCS_PREFIX}"
print_variable "Docker container registry" "${CLOUD_ROBOTICS_CONTAINER_REGISTRY}"
print_variable "Projects for private Docker images" "${PRIVATE_DOCKER_PROJECTS}"
print_variable "OAuth client id" "${CLOUD_ROBOTICS_OAUTH2_CLIENT_ID}"
print_variable "OAuth client secret" "${CLOUD_ROBOTICS_OAUTH2_CLIENT_SECRET}"
print_variable "OAuth cookie secret" "${CLOUD_ROBOTICS_COOKIE_SECRET}"
print_variable "Certificate provider" "${CLOUD_ROBOTICS_CERTIFICATE_PROVIDER}"
print_variable "Certificate Subject Organization (O)" "${CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_ORGANIZATION}"
print_variable "Certificate Subject Common Name (CN)" "${CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_COMMON_NAME}"
print_variable "Certificate Subject Organizational Unit (OU)" "${CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_ORGANIZATIONAL_UNIT}"
if ! ask_yn "Would you like to save this configuration?"; then
exit 0
fi
if [[ -n "${OLD_TERRAFORM_GCS_BUCKET}" &&\
( ! "${OLD_TERRAFORM_GCS_BUCKET}" = "${TERRAFORM_GCS_BUCKET}" ||\
! "${OLD_TERRAFORM_GCS_PREFIX}" = "${TERRAFORM_GCS_PREFIX}") ]]; then
# Copy Terraform state to new location.
echo "Copying Terraform state..."
gcloud storage cp "gs://${OLD_TERRAFORM_GCS_BUCKET}/${OLD_TERRAFORM_GCS_PREFIX}/*.tfstate" \
"gs://${TERRAFORM_GCS_BUCKET}/${TERRAFORM_GCS_PREFIX}/"
fi
# Save all parameter values.
echo
echo "Saving configuration..."
save_variable "${CONFIG_FILE}" GCP_PROJECT_ID "${GCP_PROJECT_ID}"
save_variable "${CONFIG_FILE}" GCP_REGION "${GCP_REGION}"
save_variable "${CONFIG_FILE}" GCP_ZONE "${GCP_ZONE}"
if [[ "${GKE_CLUSTER_TYPE}" == "regional" ]]; then
save_variable "${CONFIG_FILE}" CLOUD_ROBOTICS_CTX "gke_${GCP_PROJECT_ID}_${GCP_REGION}_cloud-robotics"
else
save_variable "${CONFIG_FILE}" CLOUD_ROBOTICS_CTX "gke_${GCP_PROJECT_ID}_${GCP_ZONE}_cloud-robotics"
fi
save_variable "${CONFIG_FILE}" GKE_CLUSTER_TYPE "${GKE_CLUSTER_TYPE}"
save_variable "${CONFIG_FILE}" GKE_DATAPATH_PROVIDER "${GKE_DATAPATH_PROVIDER}"
save_variable "${CONFIG_FILE}" TERRAFORM_GCS_BUCKET "${TERRAFORM_GCS_BUCKET}"
save_variable "${CONFIG_FILE}" TERRAFORM_GCS_PREFIX "${TERRAFORM_GCS_PREFIX}"
save_variable "${CONFIG_FILE}" CLOUD_ROBOTICS_CONTAINER_REGISTRY "${CLOUD_ROBOTICS_CONTAINER_REGISTRY}"
save_variable "${CONFIG_FILE}" PRIVATE_DOCKER_PROJECTS "${PRIVATE_DOCKER_PROJECTS}"
save_variable "${CONFIG_FILE}" CLOUD_ROBOTICS_OAUTH2_CLIENT_ID "${CLOUD_ROBOTICS_OAUTH2_CLIENT_ID}"
save_variable "${CONFIG_FILE}" CLOUD_ROBOTICS_OAUTH2_CLIENT_SECRET "${CLOUD_ROBOTICS_OAUTH2_CLIENT_SECRET}"
save_variable "${CONFIG_FILE}" CLOUD_ROBOTICS_COOKIE_SECRET "${CLOUD_ROBOTICS_COOKIE_SECRET}"
save_variable "${CONFIG_FILE}" CLOUD_ROBOTICS_CERTIFICATE_PROVIDER "${CLOUD_ROBOTICS_CERTIFICATE_PROVIDER}"
save_variable "${CONFIG_FILE}" CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_ORGANIZATION "${CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_ORGANIZATION}"
save_variable "${CONFIG_FILE}" CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_COMMON_NAME "${CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_COMMON_NAME}"
save_variable "${CONFIG_FILE}" CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_ORGANIZATIONAL_UNIT "${CLOUD_ROBOTICS_CERTIFICATE_SUBJECT_ORGANIZATIONAL_UNIT}"
# Upload config to the cloud.
if ! gcloud -q storage buckets describe --project ${GCP_PROJECT_ID} "${CLOUD_BUCKET}" >/dev/null 2>&1; then
gcloud storage buckets create --project ${GCP_PROJECT_ID} ${CLOUD_BUCKET}
fi
gcloud storage mv "${CONFIG_FILE}" "${CLOUD_BUCKET}/config.sh"
================================================
FILE: src/.gitignore
================================================
.gopath
================================================
FILE: src/BUILD.bazel
================================================
exports_files(
srcs = ["go.mod"],
)
================================================
FILE: src/README.md
================================================
# Source Code
This directory contains the source code for Cloud Robotics Core components. Most
components are written in Go.
## Go
The [Gazelle](https://github.com/bazelbuild/bazel-gazelle) tool manages bazel
BUILD files for Go.
### Dependencies
To automatically update dependencies in bazel BUILD files run:
```
bazel run //:gazelle
```
To re-generate Go modules dependencies run this from the top-level source
directory:
```
./src/gomod.sh
```
This will always download the latest stable tag of a go module. To use a
specific version run eg::
```
cd src
# use an older version, that the latest stable
go get -d github.com/mitchellh/go-server-timing@v1.0.1
# use a specific, yet untagged version
go get -d github.com/mitchellh/go-server-timing@feb680ab92c20d57c527399b842e1941bde888c3
# to also upgrade dependencies, use:
go get -d -u github.com/mitchellh/go-server-timing@v1.0.1
```
More tips on this [one-pager](https://encore.dev/guide/go.mod)
### Licenses
Install go-license:
```
go install github.com/google/go-licenses@latest
```
run it:
```
cd src
# get a vsc of all licenses
~/go/bin/go-licenses csv .
# check for bad (forbidden) licenses, should be empty
~/go/bin/go-licenses check ./...
```
### Docs
In order to force a new snapshot, run
```bash
VERSION=$(curl -s https://proxy.golang.org/github.com/googlecloudrobotics/core/@latest | jq -r ".Version")
echo "https://pkg.go.dev/github.com/googlecloudrobotics/core/src/go@${VERSION}"
```
and open the printed link. Then that version is part of the history.
## third party
We track some external deps through [nvchecker](https://github.com/lilydjwg/nvchecker).
Get the tool by running:
```shell
pip3 install nvchecker
```
Below are sample commands for the common workflows. Run all those from the root
of the repo.
Add new dependency by adding a blob to nvchecker.toml:
```toml
[ingress-nginx]
source = "container"
registry = "k8s.gcr.io"
container = "ingress-nginx/controller"
prefix = "v"
```
Get initial version (use same command to update the version):
```shell
$ nvtake -c nvchecker.toml ingress-nginx=0.44.0
```
After updating, please also manually keep METADATA file in sync.
Check for updates:
```shell
$ nvchecker -c nvchecker.toml
[I 09-06 12:26:20.253 core:354] ingress-nginx: updated from 0.44.0 to 1.0.0
```
================================================
FILE: src/app_charts/BUILD.bazel
================================================
load("//bazel/build_rules/app_chart:cache_gcr_credentials.bzl", "cache_gcr_credentials")
load("//bazel/build_rules/app_chart:run_parallel.bzl", "run_parallel")
# base is not in this list because it's not an app, but installed
# manually.
APPS = [
"k8s-relay",
"mission-crd",
"prometheus",
"token-vendor",
"akri",
]
run_parallel(
name = "push-cached-credentials",
targets = [
"//src/app_charts/base:base-cloud.push",
"//src/app_charts/base:base-robot.push",
"//src/app_charts/platform-apps:platform-apps-cloud.push",
] + [
"//src/app_charts/{app}:{app}.push".format(app = a)
for a in APPS
],
)
cache_gcr_credentials(
name = "push",
target = "push-cached-credentials",
)
filegroup(
name = "app_resources",
srcs = ["//src/app_charts/{app}:{app}.yaml".format(app = a) for a in APPS],
visibility = ["//visibility:public"],
)
================================================
FILE: src/app_charts/README.md
================================================
# Testing instructions
Use `bazel run :push` on the app directory to build & upload Docker images,
upload Helm charts and update the app manifest:
```bash
bazel run //src/app_charts/k8s-relay:k8s-relay.push
```
# Apps
## Mission CRD
The Mission CRD App creates the mission custom resource definition in the cloud and on the robot. The mission custom resources are used to send commands to the robot and are actuated by a robot-type-specific controller.
================================================
FILE: src/app_charts/akri/BUILD.bazel
================================================
load("//bazel:app.bzl", "app")
load("//bazel:app_chart.bzl", "app_chart")
load("//bazel:build_rules/helm_template.bzl", "helm_template")
helm_template(
name = "akri-chart.robot",
chart = "//third_party/akri:akri-0.12.9.tgz",
helm_version = 3,
# The namespace will later be replaced with the actual one.
namespace = "HELM-NAMESPACE",
release_name = "akri",
values = "akri-robot.values.yaml",
)
app_chart(
name = "akri-robot",
extra_templates = [
"//third_party/akri:akri-configuration-crd.yaml",
"//third_party/akri:akri-instance-crd.yaml",
],
files = [
":akri-chart.robot",
],
values = "values-robot.yaml",
)
app(
name = "akri",
charts = [
":akri-robot",
],
visibility = ["//visibility:public"],
)
================================================
FILE: src/app_charts/akri/akri-robot.values.yaml
================================================
# kubernetesDistro describes the Kubernetes distro Akri is running on. It is used to conditionally set
# distribution specific values such as container runtime socket. Options: microk8s | k3s | k8s
kubernetesDistro: k8s
# enable udev support for usb devices
udev:
discovery:
enabled: true
configuration:
enabled: true
name: akri-udev
discoveryDetails:
udevRules: ${UDEV_RULES}
================================================
FILE: src/app_charts/akri/robot/akri.yaml
================================================
# This includes all resources expanded from the akri chart using
# the values in ../values.yaml.
# Some pseudo-variables that were inserted there are replaced with actual runtime values.
{{ .Files.Get "files/akri-chart.robot.yaml" | replace "HELM-NAMESPACE" .Release.Namespace | replace "${UDEV_RULES}" (toJson .Values.udev.rules) }}
================================================
FILE: src/app_charts/akri/values-robot.yaml
================================================
domain: "example.com"
project: "my-gcp-project"
registry: "gcr.io/my-gcp-project"
robots: []
udev:
rules: []
================================================
FILE: src/app_charts/base/BUILD.bazel
================================================
load("@rules_shell//shell:sh_test.bzl", "sh_test")
load("//bazel:app_chart.bzl", "app_chart")
load("//bazel:build_rules/helm_template.bzl", "helm_template")
# Tests
app_chart(
name = "base-test",
extra_templates = [
":cloud/namespace.yaml",
":cloud/apps-crd.yaml",
":robot/app-management.yaml",
":robot/cert-manager.yaml",
":robot/cert-manager-certificates.yaml",
":robot/cert-manager-issuers.yaml",
],
files = [
":cert-manager-chart.robot",
],
images = {
"chart-assignment-controller": "//src/go/cmd/chart-assignment-controller:chart-assignment-controller-image",
},
values = ":values-robot.yaml",
visibility = ["//visibility:public"],
)
sh_test(
name = "app_management_test",
srcs = ["app_management_test.sh"],
data = [
":base-cloud",
":base-robot",
"@kubernetes_helm//:helm",
],
)
# Robot
helm_template(
name = "cert-manager-chart.robot",
chart = "//third_party/cert-manager:cert-manager-v1.16.3.tgz",
helm_version = 3,
# The namespace will later be replaced with the actual one.
namespace = "HELM-NAMESPACE",
release_name = "cert-manager",
values = "cert-manager-robot.values.yaml",
)
app_chart(
name = "base-robot",
extra_templates = [
":cloud/namespace.yaml",
":cloud/registry-crd.yaml",
":cloud/apps-crd.yaml",
"//third_party/kube-prometheus-stack:01-crds.yaml",
],
files = [
":cert-manager-chart.robot",
"//third_party/fluentd_gcp_addon",
],
images = {
"cr-syncer": "//src/go/cmd/cr-syncer:cr-syncer-image",
"gcr-credential-refresher": "//src/go/cmd/gcr-credential-refresher:gcr-credential-refresher-image",
"metadata-server": "//src/go/cmd/metadata-server:metadata-server-image",
"chart-assignment-controller": "//src/go/cmd/chart-assignment-controller:chart-assignment-controller-image",
},
values = "values-robot.yaml",
visibility = ["//visibility:public"],
)
# Cloud
helm_template(
name = "cert-manager-chart.cloud",
chart = "//third_party/cert-manager:cert-manager-v1.16.3.tgz",
helm_version = 3,
# The namespace will later be replaced with the actual one.
namespace = "HELM-NAMESPACE",
release_name = "cert-manager",
values = "cert-manager-cloud.values.yaml",
)
helm_template(
name = "cert-manager-google-cas-issuer-chart.cloud",
chart = "//third_party/cert-manager-google-cas-issuer:cert-manager-google-cas-issuer-v0.6.2.tgz",
# The namespace will later be replaced with the actual one.
namespace = "HELM-NAMESPACE",
release_name = "cert-manager-google-cas-issuer",
values = "cert-manager-google-cas-issuer-cloud.values.yaml",
)
app_chart(
name = "base-cloud",
extra_templates = [
"@com_github_kubernetes_sigs_application//:app_crd",
"//third_party/kube-prometheus-stack:01-crds.yaml",
],
files = [
"relay-dashboard.json",
":cert-manager-chart.cloud",
":cert-manager-google-cas-issuer-chart.cloud",
"@ingress-nginx//:ingress-nginx-dashboards",
],
images = {
"app-rollout-controller": "//src/go/cmd/app-rollout-controller:app-rollout-controller-image",
"chart-assignment-controller": "//src/go/cmd/chart-assignment-controller:chart-assignment-controller-image",
"cr-syncer-auth-webhook": "//src/go/cmd/cr-syncer-auth-webhook:cr-syncer-auth-webhook-image",
},
values = "values-cloud.yaml",
visibility = ["//visibility:public"],
)
================================================
FILE: src/app_charts/base/README.md
================================================
# Making changes to the `fluent-bit` configmap
If you want to change the `fluent-bit` spec, do not edit the autogenerated file
[`./robot/fluent-bit.yaml`](./robot/fluent-bit.yaml)!
Instead, edit [`./fluent-bit-values.yaml`](./fluent-bit-values.yaml) and run
[`./fluent-bit-helm.sh`](./fluent-bit-helm.sh) afterwards.
================================================
FILE: src/app_charts/base/app_management_test.sh
================================================
#!/usr/bin/env bash
#
# Copyright 2019 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
HELM="${TEST_SRCDIR}/+non_module_deps+kubernetes_helm/helm"
if [[ ! -x "${HELM}" ]] ; then
# If we hit this again, consider using the runfiles library:
# https://github.com/bazelbuild/bazel/blob/master/tools/bash/runfiles/runfiles.bash#L55-L86
echo >&2 "Failed to locate helm in ${TEST_SRCDIR}."
exit 1
fi
CLOUD_BASE="${TEST_SRCDIR}/_main/src/app_charts/base/base-cloud-0.0.1.tgz"
ROBOT_BASE="${TEST_SRCDIR}/_main/src/app_charts/base/base-robot-0.0.1.tgz"
function test_failed() {
echo "TEST FAILED: $1"
exit 1
}
function test_passed() {
echo "TEST PASSED: $1"
}
function expect_app_installed() {
local command="$1"
local application="$2"
local template
if ! template=$(${command}); then
test_failed "\"${command}\" failed"
fi
if [[ "${template}" != *"app: ${application}"* ]]; then
echo "TEMPLATE: ${template}"
test_failed "expected \"${application}\" to be installed in template created by \"${command}\""
fi
test_passed "application \"${application}\" is included in template created by \"${command}\""
}
function expect_app_not_installed() {
local command="$1"
local application="$2"
local template
if ! template=$(${command}); then
echo "TEMPLATE: ${template}"
test_failed "\"${command}\" failed"
fi
if [[ "${template}" == *"app: ${application}"* ]]; then
test_failed "did not expected \"${application}\" to be installed in template created by \"${command}\""
fi
test_passed "application \"${application}\" is not included in template created by \"${command}\""
}
expect_app_installed "${HELM} template ${CLOUD_BASE} --set-string app_management=true" "app-rollout-controller"
expect_app_installed "${HELM} template ${CLOUD_BASE} --set-string app_management=true" "chart-assignment-controller"
expect_app_not_installed "${HELM} template ${CLOUD_BASE} --set-string app_management=false" "app-rollout-controller"
expect_app_not_installed "${HELM} template ${CLOUD_BASE} --set-string app_management=false" "chart-assignment-controller"
expect_app_installed "${HELM} template ${ROBOT_BASE} --set-string app_management=true" "chart-assignment-controller"
expect_app_not_installed "${HELM} template ${ROBOT_BASE} --set-string app_management=false" "chart-assignment-controller"
================================================
FILE: src/app_charts/base/cert-manager-cloud.values.yaml
================================================
# Configuration for the cert-manager chart.
# Reference: https://github.com/jetstack/cert-manager/blob/master/deploy/charts/cert-manager/values.yaml
# Install CRDs by helm chart that webhook works in different namespace as cert-manager
installCRDs: true
# Enable Workload Identity for DNS01 support when we have a custom domain.
serviceAccount:
annotations:
iam.gke.io/gcp-service-account: cert-manager@PROJECT-ID.iam.gserviceaccount.com
================================================
FILE: src/app_charts/base/cert-manager-google-cas-issuer-cloud.values.yaml
================================================
# Configuration for the cert-manager chart.
# Reference: https://github.com/jetstack/google-cas-issuer/blob/main/deploy/charts/google-cas-issuer/values.yaml
# No values are required for now.
# This was put in place to add values in the future without requiring additional configuration in other files.
# If values are added this disclaimer should be removed.
# The Kubernetes service account must be annotated in order to impersonate a GCP service account using workload identity.
serviceAccount:
annotations:
# PROJECT-ID will be replaced by a script in a future step with the contents of the `PROJECT_ID` env var.
iam.gke.io/gcp-service-account: sa-google-cas-issuer@PROJECT-ID.iam.gserviceaccount.com
app:
approval:
subjects:
- kind: ServiceAccount
name: cert-manager
# TODO(alejoasd): this should be set from configuration dynamically
namespace: default
================================================
FILE: src/app_charts/base/cert-manager-robot.values.yaml
================================================
# Configuration for the cert-manager chart.
# Reference: https://github.com/jetstack/cert-manager/blob/master/deploy/charts/cert-manager/values.yaml
global:
# Reduce verbosity of text-logging.
logLevel: 1
# Install CRDs by helm chart that webhook works in different namespace as cert-manager
installCRDs: true
# Disable leader-elect for cert-manager and cainjector.
# Since we only have one replica running, we don't
# need leader election to ensure only one instance is active.
# By turning this off, the leader will not update its leases in etcd every N seconds which
# ultimately reduces etcd disk writes.
#
# To ensure that we only have one replica running, we need to use the Recreate
# deployment strategy.
extraArgs:
- --leader-elect=false
strategy:
type: Recreate
cainjector:
extraArgs:
- --leader-elect=false
strategy:
type: Recreate
================================================
FILE: src/app_charts/base/cloud/app-management-policy.yaml
================================================
# This policy lets app-rollout and chart-assigment controllers operate on the
# apps & registry CRDs. It also grants chart-assignment-controller the
# cluster-admin role, which lets it install any Helm chart. This is a very
# broad role, but since many Helm charts install ClusterRoleBindings it's not
# possible to do it without cluster-admin or something equivalent.
# For app-rollout-controller
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cloud-robotics:app-rollout-controller:base
labels:
app-rollout-controller.cloudrobotics.com/aggregate-to-app-rollout: "true"
rules:
- apiGroups:
- registry.cloudrobotics.com
resources:
- robots
- robots/status
verbs:
- get
- list
- watch
- apiGroups:
- apps.cloudrobotics.com
resources:
- apps
- approllouts
- approllouts/status
- chartassignments
- chartassignments/status
verbs:
- get
- list
- watch
- apiGroups:
- apps.cloudrobotics.com
resources:
- chartassignments
verbs:
- create
- update
- patch
- delete
- apiGroups:
- apps.cloudrobotics.com
resources:
- approllouts/status
verbs:
- update
- patch
---
# Aggregated role for app-rollout-controller
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cloud-robotics:app-rollout-controller
aggregationRule:
clusterRoleSelectors:
- matchLabels:
app-rollout-controller.cloudrobotics.com/aggregate-to-app-rollout: "true"
rules: [] # The control plane automatically fills in the rules
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: app-rollout-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cloud-robotics:app-rollout-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cloud-robotics:app-rollout-controller
subjects:
- namespace: {{ .Release.Namespace }}
kind: ServiceAccount
name: app-rollout-controller
---
# For chart-assignment-controller
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cloud-robotics:chart-assignment-controller:base
labels:
chart-assignment-controller.cloudrobotics.com/aggregate-to-chart-assignment: "true"
rules:
- apiGroups:
- apps.cloudrobotics.com
resources:
- chartassignments
- chartassignments/status
verbs:
- get
- list
- watch
- apiGroups:
- apps.cloudrobotics.com
resources:
- chartassignments/status
verbs:
- update
- patch
- apiGroups:
- apps.cloudrobotics.com
resources:
- resourcesets
- resourcesets/status
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cloud-robotics:chart-assignment-controller:namespace-admin
labels:
chart-assignment-controller.cloudrobotics.com/aggregate-to-chart-assignment: "true"
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
---
# Aggregated role for chart-assignment-controller
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cloud-robotics:chart-assignment-controller
aggregationRule:
clusterRoleSelectors:
- matchLabels:
chart-assignment-controller.cloudrobotics.com/aggregate-to-chart-assignment: "true"
- matchLabels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules: [] # The control plane automatically fills in the rules
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: chart-assignment-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cloud-robotics:chart-assignment-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cloud-robotics:chart-assignment-controller
subjects:
- namespace: {{ .Release.Namespace }}
kind: ServiceAccount
name: chart-assignment-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cloud-robotics:chart-assignment-controller:cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- namespace: {{ .Release.Namespace }}
kind: ServiceAccount
name: chart-assignment-controller
================================================
FILE: src/app_charts/base/cloud/app-management.yaml
================================================
{{ if eq .Values.app_management "true" }}
# app-rollout-controller
apiVersion: apps/v1
kind: Deployment
metadata:
name: app-rollout-controller
spec:
replicas: 1
selector:
matchLabels:
app: app-rollout-controller
template:
metadata:
labels:
app: app-rollout-controller
spec:
serviceAccountName: app-rollout-controller
containers:
- name: app-rollout-controller
image: {{ .Values.registry }}{{ .Values.images.app_rollout_controller }}
args:
- "--params=\
domain={{ .Values.domain }},\
project={{ .Values.project }},\
ingress_ip={{ .Values.ingress_ip }},\
registry={{ .Values.registry }},\
deploy_environment={{ .Values.deploy_environment }},\
region={{ .Values.region }},\
use_tv_verbose={{ .Values.use_tv_verbose }}"
- --webhook-port=9876
- --cert-dir=/tls
env:
- name: GOOGLE_CLOUD_PROJECT
value: {{ .Values.project }}
livenessProbe:
initialDelaySeconds: 15
periodSeconds: 10
httpGet:
port: 8080
path: /healthz
ports:
- name: webhook
containerPort: 9876
volumeMounts:
- mountPath: /tls
name: tls
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
securityContext:
runAsNonRoot: true
runAsUser: 65532
runAsGroup: 65532
volumes:
- name: tls
secret:
secretName: app-rollout-controller-tls
---
apiVersion: v1
kind: Service
metadata:
name: app-rollout-controller
spec:
type: ClusterIP
ports:
- port: 443
targetPort: webhook
selector:
app: app-rollout-controller
---
# The app rollout controller runs admission webhooks, which need to be served via TLS.
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: app-rollout-controller
spec:
secretName: app-rollout-controller-tls
commonName: app-rollout-controller.{{ .Release.Namespace }}.svc
dnsNames:
- app-rollout-controller.{{ .Release.Namespace }}.svc
- app-rollout-controller.{{ .Release.Namespace }}.svc.cluster.local
issuerRef:
kind: ClusterIssuer
name: cluster-authority
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: app-rollout-controller
annotations:
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/app-rollout-controller
webhooks:
- name: apps.apps.cloudrobotics.com
admissionReviewVersions: ["v1"]
failurePolicy: Fail
clientConfig:
service:
namespace: {{ .Release.Namespace }}
name: app-rollout-controller
path: /app/validate
rules:
- apiGroups:
- apps.cloudrobotics.com
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- apps
sideEffects: None
- name: approllouts.apps.cloudrobotics.com
admissionReviewVersions: ["v1"]
failurePolicy: Fail
clientConfig:
service:
namespace: {{ .Release.Namespace }}
name: app-rollout-controller
path: /approllout/validate
rules:
- apiGroups:
- apps.cloudrobotics.com
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- approllouts
sideEffects: None
---
# chart-assignment-controller
apiVersion: apps/v1
kind: Deployment
metadata:
name: chart-assignment-controller
spec:
replicas: 1
selector:
matchLabels:
app: chart-assignment-controller
template:
metadata:
labels:
app: chart-assignment-controller
spec:
serviceAccountName: chart-assignment-controller
containers:
- name: chart-assignment-controller
image: {{ .Values.registry }}{{ .Values.images.chart_assignment_controller }}
args:
- "--cloud-cluster=true"
- "--webhook-enabled=true"
- "--webhook-port=9876"
- "--cert-dir=/tls"
env:
- name: GOOGLE_CLOUD_PROJECT
value: {{ .Values.project }}
livenessProbe:
initialDelaySeconds: 15
periodSeconds: 10
httpGet:
port: 8080
path: /healthz
ports:
- name: webhook
containerPort: 9876
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
volumeMounts:
- name: tls
mountPath: /tls
- name: tmp
mountPath: /tmp
volumes:
- name: tls
secret:
secretName: chart-assignment-controller-tls
- name: tmp
emptyDir:
medium: Memory
securityContext:
runAsNonRoot: true
runAsUser: 65532
runAsGroup: 65532
---
# The chart assignment controller runs admission webhooks, which need to be served via TLS.
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: chart-assignment-controller
spec:
secretName: chart-assignment-controller-tls
commonName: chart-assignment-controller.{{ .Release.Namespace }}.svc
dnsNames:
- chart-assignment-controller.{{ .Release.Namespace }}.svc
- chart-assignment-controller.{{ .Release.Namespace }}.svc.cluster.local
issuerRef:
kind: ClusterIssuer
name: cluster-authority
---
apiVersion: v1
kind: Service
metadata:
name: chart-assignment-controller
spec:
type: ClusterIP
ports:
- port: 443
targetPort: webhook
selector:
app: chart-assignment-controller
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: chart-assignment-controller
annotations:
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/chart-assignment-controller
webhooks:
- name: chartassignments.apps.cloudrobotics.com
admissionReviewVersions: ["v1"]
failurePolicy: Fail
clientConfig:
service:
namespace: {{ .Release.Namespace }}
name: chart-assignment-controller
path: /chartassignment/validate
rules:
- apiGroups:
- apps.cloudrobotics.com
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- chartassignments
sideEffects: None
{{ end }}
================================================
FILE: src/app_charts/base/cloud/apps-crd.yaml
================================================
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: apps.apps.cloudrobotics.com
annotations:
helm.sh/resource-policy: keep
spec:
group: apps.cloudrobotics.com
names:
kind: App
plural: apps
singular: app
scope: Cluster
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
repository:
type: string
version:
type: string
components:
type: object
properties:
cloud:
type: object
properties:
name:
type: string
inline:
type: string
robot:
type: object
properties:
name:
type: string
inline:
type: string
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: approllouts.apps.cloudrobotics.com
annotations:
helm.sh/resource-policy: keep
spec:
group: apps.cloudrobotics.com
names:
kind: AppRollout
plural: approllouts
singular: approllout
scope: Cluster
versions:
- name: v1alpha1
served: true
storage: true
subresources:
status: {}
additionalPrinterColumns:
- jsonPath: .status.assignments
name: Assignments
type: integer
- jsonPath: .status.readyAssignments
name: Ready
type: integer
- jsonPath: .status.failedAssignments
name: Failed
type: integer
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
appName:
type: string
cloud:
type: object
properties:
values:
type: object
x-kubernetes-preserve-unknown-fields: true
robots:
type: array
items:
type: object
properties:
values:
type: object
x-kubernetes-preserve-unknown-fields: true
version:
type: string
selector:
type: object
properties:
any:
type: boolean
matchLabels:
type: object
x-kubernetes-preserve-unknown-fields: true
matchExpressions:
type: array
items:
type: object
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
status:
type: object
properties:
observedGeneration:
type: integer
assignments:
type: integer
readyAssignments:
type: integer
settledAssignments:
type: integer
failedAssignments:
type: integer
conditions:
type: array
items:
type: object
properties:
lastUpdateTime:
type: string
format: date-time
lastTransitionTime:
type: string
format: date-time
status:
type: string
type:
type: string
message:
type: string
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: chartassignments.apps.cloudrobotics.com
annotations:
cr-syncer.cloudrobotics.com/spec-source: cloud
cr-syncer.cloudrobotics.com/filter-by-robot-name: "True"
helm.sh/resource-policy: keep
spec:
group: apps.cloudrobotics.com
names:
kind: ChartAssignment
plural: chartassignments
singular: chartassignment
scope: Cluster
versions:
- name: v1alpha1
served: true
storage: true
additionalPrinterColumns:
- jsonPath: .status.phase
name: Phase
type: string
- jsonPath: .status.observedGeneration
name: Generation
type: integer
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
subresources:
status: {}
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
clusterName:
type: string
namespaceName:
type: string
chart:
type: object
properties:
repository:
type: string
name:
type: string
version:
type: string
inline:
type: string
values:
type: object
x-kubernetes-preserve-unknown-fields: true
status:
type: object
properties:
observedGeneration:
type: integer
conditions:
type: array
items:
type: object
properties:
lastUpdateTime:
type: string
format: date-time
lastTransitionTime:
type: string
format: date-time
status:
type: string
type:
type: string
message:
type: string
phase:
type: string
================================================
FILE: src/app_charts/base/cloud/cert-ingress.yaml
================================================
# Owns the 'tls' block to ensure a cert is generated and avoids repetition of
# the 'tls' block in other ingresses. Cert is associated via 'host' field.
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: certificate-ingress
spec:
ingressClassName: nginx
tls:
- hosts:
- {{ .Values.domain }}
rules:
- {}
================================================
FILE: src/app_charts/base/cloud/cert-manager-certificates.yaml
================================================
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: selfsigned-ca
spec:
isCA: true
duration: 8760h # 1year. This needs to be at least 3x 90days
commonName: {{ .Values.domain }}
secretName: cluster-authority
privateKey:
algorithm: ECDSA
size: 256
issuerRef:
name: selfsigned-issuer
kind: ClusterIssuer
group: cert-manager.io
{{- if .Values.certificate_provider }}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: cloud-robotics
spec:
commonName: {{ .Values.domain }}
secretName: tls
dnsNames:
- {{ .Values.domain }}
{{- if eq .Values.certificate_provider "lets-encrypt" }}
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
{{- else if eq .Values.certificate_provider "google-cas" }}
issuerRef:
name: google-cas
group: cas-issuer.jetstack.io
kind: GoogleCASClusterIssuer
{{- end }}
{{- end }}
================================================
FILE: src/app_charts/base/cloud/cert-manager-google-cas-issuer.yaml
================================================
{{- if eq .Values.certificate_provider "google-cas" }}
# This includes all resources expanded from the cert-manager chart using
# the values in ../cert-manager-cloud.values.yaml.
# Some pseudo-variables that were inserted there are replaced with actual runtime values.
{{ .Files.Get "files/cert-manager-google-cas-issuer-chart.cloud.yaml" | replace "HELM-NAMESPACE" .Release.Namespace | replace "PROJECT-ID" .Values.project }}
{{- end }}
================================================
FILE: src/app_charts/base/cloud/cert-manager-issuers.yaml
================================================
# A self-signing issuer for cluster-internal services.
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: selfsigned-issuer
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: cluster-authority
spec:
ca:
secretName: cluster-authority
{{- if eq .Values.certificate_provider "lets-encrypt" }}
---
# While an Issuer may satisfy our current needs within the default namespace,
# anticipating future growth and potential deployments in additional namespaces,
# adopting a ClusterIssuer offers a more scalable and versatile solution.
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: "{{ .Values.owner_email }}"
privateKeySecretRef:
name: letsencrypt-prod
# We can't use dns01 since we don't control the dns-zone that endpoints uses.
solvers:
- http01:
ingress:
class: nginx
{{- else if eq .Values.certificate_provider "google-cas" }}
---
# Issuer for Google's Certificate Authority service (CAS) using the google-cas-issuer project.
# https://github.com/jetstack/google-cas-issuer
apiVersion: cas-issuer.jetstack.io/v1beta1
kind: GoogleCASClusterIssuer
metadata:
name: google-cas
spec:
project: {{ .Values.project }}
location: {{ .Values.region }}
caPoolId: "{{ .Values.project }}-ca-pool"
{{- end }}
================================================
FILE: src/app_charts/base/cloud/cert-manager.yaml
================================================
# This includes all resources expanded from the cert-manager chart using
# the values in ../cert-manager-cloud.values.yaml.
# Some pseudo-variables that were inserted there are replaced with actual runtime values.
{{ .Files.Get "files/cert-manager-chart.cloud.yaml" | replace "HELM-NAMESPACE" .Release.Namespace | replace "PROJECT-ID" .Values.project }}
================================================
FILE: src/app_charts/base/cloud/cr-syncer-auth-webhook.yaml
================================================
{{ if eq .Values.onprem_federation "true" }}
# The cr-syncer-auth-webhook verifies that requests from the cr-syncer are
# limited to the robot named in the credentials.
apiVersion: apps/v1
kind: Deployment
metadata:
name: cr-syncer-auth-webhook
spec:
selector:
matchLabels:
app: cr-syncer-auth-webhook
template:
metadata:
labels:
app: cr-syncer-auth-webhook
spec:
containers:
- name: cr-syncer-auth-webhook
image: {{ .Values.registry }}{{ .Values.images.cr_syncer_auth_webhook }}
args:
- --port=8080
- --accept-legacy-service-account-credentials
- --token-vendor=http://token-vendor.app-token-vendor.svc.cluster.local
ports:
- name: webhook
containerPort: 8080
readinessProbe:
httpGet:
path: /healthz
port: 8080
livenessProbe:
httpGet:
path: /healthz
port: 8080
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
securityContext:
runAsNonRoot: true
runAsUser: 65532
runAsGroup: 65532
serviceAccountName: cr-syncer-auth-webhook
---
# The incoming request from the cr-syncer will be extended with a header to
# impersonate this SA if it passes the webhook's policy checks.
apiVersion: v1
kind: ServiceAccount
metadata:
name: cr-syncer-auth-webhook
---
apiVersion: v1
kind: Service
metadata:
name: cr-syncer-auth-webhook
labels:
app: cr-syncer-auth-webhook
spec:
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
app: cr-syncer-auth-webhook
type: ClusterIP
{{ end }}
================================================
FILE: src/app_charts/base/cloud/cr-syncer-policy.yaml
================================================
{{ if eq .Values.onprem_federation "true" }}
# This policy lets the cr-syncer operate on the apps & registry CRDs.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cloud-robotics:cr-syncer:base
labels:
cr-syncer.cloudrobotics.com/aggregate-to-robot-service: "true"
rules:
# To sync the specs from the cloud to the robot, the cr-syncer needs to read
# the resources in the cloud cluster. Note that the Robot and ChartAssignment
# CRDs enable the /status subresource, whereas the RobotType does not.
- apiGroups:
- registry.cloudrobotics.com
resources:
- robots
- robots/status
- robottypes
verbs:
- get
- list
- watch
- apiGroups:
- apps.cloudrobotics.com
resources:
- chartassignments
- chartassignments/status
verbs:
- get
- list
- watch
# Only the /status subresource can be updated. It's important that the robot
# can update the status but not the spec, or it could run code on other robots.
- apiGroups:
- registry.cloudrobotics.com
resources:
- robots/status
verbs:
- update
- apiGroups:
- apps.cloudrobotics.com
resources:
- chartassignments/status
verbs:
- update
---
# This aggregate role will combine all roles with the given label. This means
# that policy can easily be added for CRDs beyond those listed above.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cloud-robotics:cr-syncer
aggregationRule:
clusterRoleSelectors:
- matchLabels:
cr-syncer.cloudrobotics.com/aggregate-to-robot-service: "true"
rules: [] # The control plane automatically fills in the rules
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cloud-robotics:cr-syncer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cloud-robotics:cr-syncer
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: robot-service@{{ .Values.project }}.iam.gserviceaccount.com
# The grant for the cr-syncer-auth-webhook replaces the grant for the
# robot-service@ account.
- namespace: {{ .Release.Namespace }}
kind: ServiceAccount
name: cr-syncer-auth-webhook
{{ end }}
================================================
FILE: src/app_charts/base/cloud/domain-redirect.yaml
================================================
{{ $endpointsURL := print "www.endpoints." .Values.project ".cloud.goog" }}
{{ if ne $endpointsURL .Values.domain }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: domain-redirect
annotations:
nginx.ingress.kubernetes.io/permanent-redirect: http://{{ .Values.domain }}
spec:
ingressClassName: nginx
rules:
- host: "www.endpoints.{{ .Values.project }}.cloud.goog"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
# This isn't used, but needs to be specified.
name: dummy
port:
number: 80
{{ end }}
================================================
FILE: src/app_charts/base/cloud/fluentd-metrics.yaml
================================================
# Adds a Prometheus ServiceMonitor for scraping the fluentd metrics.
# By default, google-fluentd exports some Prometheus metrics on port 24231.
apiVersion: v1
kind: Service
metadata:
name: fluentd-metrics
labels:
app: fluentd-metrics
namespace: kube-system
spec:
ports:
- port: 24231
name: metrics
selector:
k8s-app: fluentd-gcp
type: ClusterIP
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: fluentd-metrics
labels:
prometheus: kube-prometheus
namespace: kube-system
spec:
endpoints:
- port: metrics
path: /metrics
interval: 10s
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
selector:
matchLabels:
app: fluentd-metrics
================================================
FILE: src/app_charts/base/cloud/kubernetes-api.yaml
================================================
{{ if eq .Values.onprem_federation "true" }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kubernetes-api
annotations:
nginx.ingress.kubernetes.io/auth-url: http://cr-syncer-auth-webhook.default.svc.cluster.local/auth
nginx.ingress.kubernetes.io/auth-response-headers: Authorization
nginx.ingress.kubernetes.io/rewrite-target: /$2
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
nginx.ingress.kubernetes.io/proxy-read-timeout: "600" # seconds
nginx.ingress.kubernetes.io/client-body-buffer-size: "50m"
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.domain }}
http:
paths:
- path: /apis/core.kubernetes($|/)(.*)
pathType: Prefix
backend:
service:
name: kubernetes
port:
number: 443
{{ end }}
================================================
FILE: src/app_charts/base/cloud/namespace.yaml
================================================
apiVersion: v1
kind: Namespace
metadata:
name: {{ .Release.Namespace }}
labels:
certmanager.k8s.io/disable-validation: "true"
================================================
FILE: src/app_charts/base/cloud/nginx-ingress-controller-policy.yaml
================================================
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ''
resources:
- nodes
verbs:
- get
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resourceNames:
- ingress-controller-leader
resources:
- leases
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: {{ .Release.Namespace }}
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- namespaces
verbs:
- get
- apiGroups:
- ''
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- ingress-controller-leader
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: {{ .Release.Namespace }}
================================================
FILE: src/app_charts/base/cloud/nginx-ingress-controller.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-ingress-controller
data:
# This is the same as the default but with the addition of $http_x_forwarded_for,
# which is useful when the GKE Global Application LB is also pointed at nginx.
# https://cloud.google.com/load-balancing/docs/https#x-forwarded-for_header
log-format-upstream: $remote_addr - $remote_user - $http_x_forwarded_for [$time_local]
"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length
$request_time [$proxy_upstream_name] [$proxy_alternative_upstream_name] $upstream_addr
$upstream_response_length $upstream_response_time $upstream_status $req_id
# The token-vendor checks the Original-URI header to accept tokens from query
# parameters.
proxy-add-original-uri-header: "true"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-ingress-controller
labels:
k8s-app: nginx-ingress-controller
spec:
selector:
matchLabels:
k8s-app: nginx-ingress-controller
template:
metadata:
labels:
k8s-app: nginx-ingress-controller
spec:
serviceAccountName: ingress-nginx
dnsPolicy: ClusterFirst
containers:
- name: nginx-ingress-controller
image: registry.k8s.io/ingress-nginx/controller-chroot:v1.8.4@sha256:76100ab4c1b3cdc2697dd26492ba42c6519e99c5df1bc839ac5d6444a2c58d17
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
resources:
requests:
memory: "1Gi"
cpu: 1
args:
- /nginx-ingress-controller
- --v=1
- --default-backend-service=kube-system/default-http-backend
- --publish-service=$(POD_NAMESPACE)/nginx-ingress-lb
- --election-id=ingress-controller-leader
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/nginx-ingress-controller
- --default-ssl-certificate=default/tls
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
- SYS_CHROOT
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
ports:
- name: http
containerPort: 80
- name: https
containerPort: 443
- name: healthz
containerPort: 10254
nodeSelector:
kubernetes.io/os: linux
terminationGracePeriodSeconds: 300
---
apiVersion: v1
kind: Service
metadata:
name: nginx-ingress-lb
labels:
app: nginx-ingress-lb
spec:
type: LoadBalancer
externalTrafficPolicy: Local
loadBalancerIP: {{ .Values.ingress_ip }}
ports:
- port: 80
name: http
targetPort: 80
appProtocol: HTTP
- port: 443
name: https
targetPort: 443
appProtocol: HTTPS
selector:
k8s-app: nginx-ingress-controller
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
app.kubernetes.io/component: controller
name: nginx
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
spec:
controller: k8s.io/ingress-nginx
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: nginx-ingress-controller
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: nginx-ingress-controller
minReplicas: 1
maxReplicas: 5
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx-ingress-controller-metrics
labels:
app: nginx-ingress-controller-metrics
spec:
ports:
- port: 10254
name: healthz
selector:
k8s-app: nginx-ingress-controller
type: ClusterIP
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: nginx-ingress-controller-metrics
labels:
prometheus: kube-prometheus
spec:
endpoints:
- port: healthz
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
selector:
matchLabels:
app: nginx-ingress-controller-metrics
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-dashboards-json
labels:
grafana: "1"
data:
nginx.json: |-
{{ .Files.Get "files/nginx.json" | indent 4 }}
request-handling-performance.json: |-
{{ .Files.Get "files/request-handling-performance.json" | indent 4 }}
================================================
FILE: src/app_charts/base/cloud/oauth2-proxy.yaml
================================================
{{ if and (ne .Values.oauth2_proxy.client_id "") (ne .Values.oauth2_proxy.client_secret "") }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: oauth2-proxy
spec:
replicas: 1
selector:
matchLabels:
app: oauth2-proxy
template:
metadata:
labels:
app: oauth2-proxy
spec:
containers:
- name: oauth2-proxy
args:
- --provider=oidc
- --oidc-issuer-url=https://accounts.google.com
- --email-domain=*
- --upstream=http://token-vendor.default.svc.cluster.local/apis/core.token-vendor/
- --upstream=https://{{ .Values.domain }}/
- --http-address=0.0.0.0:8080
- --pass-access-token
- --pass-host-header
- "--scope=profile email https://www.googleapis.com/auth/iam"
- --cookie-expire=168h
- --cookie-refresh=1h
env:
- name: OAUTH2_PROXY_CLIENT_ID
value: {{ .Values.oauth2_proxy.client_id }}
- name: OAUTH2_PROXY_CLIENT_SECRET
value: {{ .Values.oauth2_proxy.client_secret }}
- name: OAUTH2_PROXY_COOKIE_SECRET
value: {{ .Values.oauth2_proxy.cookie_secret }}
image: quay.io/oauth2-proxy/oauth2-proxy:v7.5.1
ports:
- name: http
containerPort: 8080
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
securityContext:
runAsNonRoot: true
runAsUser: 65532
runAsGroup: 65532
---
apiVersion: v1
kind: Service
metadata:
name: oauth2-proxy
spec:
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
app: oauth2-proxy
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: oauth2-proxy
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /apis/$2
nginx.ingress.kubernetes.io/proxy-read-timeout: "600" # seconds
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.domain }}
http:
paths:
- path: /web-apis($|/)(.*)
pathType: Prefix
backend:
service:
name: oauth2-proxy
port:
name: http
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: oauth2-proxy-interactive
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.domain }}
http:
paths:
- path: "/oauth2"
pathType: Prefix
backend:
service:
name: oauth2-proxy
port:
name: http
{{ end }}
================================================
FILE: src/app_charts/base/cloud/registry-crd.yaml
================================================
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: robottypes.registry.cloudrobotics.com
annotations:
cr-syncer.cloudrobotics.com/spec-source: cloud
helm.sh/resource-policy: keep
spec:
group: registry.cloudrobotics.com
names:
kind: RobotType
plural: robottypes
singular: robottype
scope: Namespaced
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
required: ['make', 'model']
maxProperties: 2
properties:
make:
type: string
model:
type: string
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: robots.registry.cloudrobotics.com
annotations:
cr-syncer.cloudrobotics.com/filter-by-robot-name: "True"
cr-syncer.cloudrobotics.com/status-subtree: "robot"
cr-syncer.cloudrobotics.com/spec-source: cloud
helm.sh/resource-policy: keep
spec:
group: registry.cloudrobotics.com
names:
kind: Robot
plural: robots
singular: robot
scope: Namespaced
versions:
- name: v1alpha1
served: true
storage: true
subresources:
status: {}
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
maxProperties: 3
properties:
type:
type: string
project:
type: string
status:
type: object
properties:
cloud:
type: object
x-kubernetes-preserve-unknown-fields: true
robot:
type: object
properties:
info:
type: object
additionalProperties:
type: string
updateTime:
type: string
state:
type: string
enum:
- UNDEFINED
- UNAVAILABLE
- AVAILABLE
- EMERGENCY_STOP
- ERROR
lastStateChangeTime:
type: string
batteryPercentage:
type: number
emergencyStopButtonPressed:
type: boolean
configuration:
type: object
properties:
trolleyAttached:
type: boolean
================================================
FILE: src/app_charts/base/cloud/registry-policy.yaml
================================================
# This policy lets the human-acl GCP SA register robots. For context, see the
# IAM policy in service-account.tf.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cloud-robotics:robot-creator
rules:
- apiGroups:
- registry.cloudrobotics.com
resources:
- robots
- robots/status
verbs:
- create
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cloud-robotics:human-acl
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cloud-robotics:robot-creator
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: human-acl@{{ .Values.project }}.iam.gserviceaccount.com
================================================
FILE: src/app_charts/base/cloud/relay-dashboards.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
name: relay-dashboards-json
labels:
grafana: "1"
data:
relay-dashboard.json: |-
{{ .Files.Get "files/relay-dashboard.json" | indent 4 }}
================================================
FILE: src/app_charts/base/cloud/token-vendor-app-fwd.yaml
================================================
# The Token Vendor was moved to the app namespace.
# We create this service here in default namespace
# as some codepaths still use the hard-coded
# "token-vendor.default.svc.cluster.local" address.
apiVersion: v1
kind: Service
metadata:
name: token-vendor
annotations:
spec:
ports:
- port: 80
targetPort: 9090
protocol: TCP
name: token-vendor
type: ExternalName
externalName: token-vendor.app-token-vendor.svc.cluster.local
================================================
FILE: src/app_charts/base/cloud/token-vendor-rollout.yaml
================================================
apiVersion: apps.cloudrobotics.com/v1alpha1
kind: AppRollout
metadata:
name: token-vendor
labels:
app: token-vendor
spec:
appName: token-vendor-dev
cloud: {}
================================================
FILE: src/app_charts/base/fluent-bit-helm.sh
================================================
#!/bin/bash
# needs at least helm v3.5.0
OUTPUT=./robot/fluent-bit.yaml
TEMPLATE_VERSION=0.48.9
helm repo add fluent https://fluent.github.io/helm-charts
helm repo update fluent
helm template fluent-bit fluent/fluent-bit --version ${TEMPLATE_VERSION} -f fluent-bit-values.yaml --skip-tests > ${OUTPUT}
sed -i '1i\{{ if and (eq .Values.robot_authentication "true") (eq .Values.fluentbit "true") }}' ${OUTPUT}
sed -i '1i\# !!! DO NOT EDIT THIS FILE !!!\n# This file is autogenerated using src/app_charts/base/fluent-bit-helm.sh.\n# See src/app_charts/base/README.md for update instructions.' ${OUTPUT}
sed -i '$a\{{ end }}' ${OUTPUT}
sed -i 's/MY_ROBOT/{{ .Values.robot.name }}/' ${OUTPUT}
# Add a template expressions for prepending a subdomain to the fluentbit Tag_Prefix
# If no subdomain is supplied, the Tag_Prefix will resolve to "kube.var.log.containers."
# Otherwise, if subdomain is supplied (without the "." at the end), the Tag_Prefix will be ".kube.var.log.containers."
sed -i 's/kube\.\*/{{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*/' ${OUTPUT}
sed -i 's/Tag_Prefix kube.var.log.containers./Tag_Prefix {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.var.log.containers./' ${OUTPUT}
# This needs to be an actual Cloud zone so that it can be mapped
# to a Monarch/Stackdriver region. TODO(swolter): We should make
# this zone configurable to avoid confusing users.
sed -i 's/MY_CLUSTER_LOCATION/europe-west1-c/' ${OUTPUT}
================================================
FILE: src/app_charts/base/fluent-bit-values.yaml
================================================
image:
pullPolicy: IfNotPresent
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file
config:
service: |
[SERVICE]
Daemon Off
Flush {{ .Values.flush }}
Log_Level {{ .Values.logLevel }}
Parsers_File custom_parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port {{ .Values.metricsPort }}
Health_Check On
## https://docs.fluentbit.io/manual/pipeline/parsers
customParsers: |
# Merges multi-line Abseil logs. The regexs assume that any line which does not start
# with an Abseil log preamble is part of the previous log message. No assumptions on
# indentation or similar are made.
[MULTILINE_PARSER]
Name absl_logs_multiline
Type regex
Flush_timeout 1000
#
# Regex rules for multiline parsing
# ---------------------------------
#
# configuration hints:
#
# - first state always has the name: start_state
# - every field in the rule must be inside double quotes
#
# rules | state name | regex pattern | next state
# ------|---------------|---------------------------------------------------------------------------------------------
Rule "start_state" "/^((W|I|E|F))([0-9]{4}) ([^ ]+)\s+([-0-9]+) (\S+:\d+)] (.*)$/" "cont"
Rule "cont" "/^(?!(((W|I|E|F))([0-9]{4}) ([^ ]+)\s+([-0-9]+) (\S+:\d+)]))(.*)$/" "cont"
# A parser for Abseil log files: https://abseil.io/docs/cpp/guides/logging#prefix
[PARSER]
Name absl_logs
Format regex
Regex ^(?(W|I|E|F))([0-9]{4}) (?[^ ]+)\s+(?[-0-9]+) (?\S+:\d+)] (?[\s\S]*)$
Time_Key time
Time_Format %H:%M:%S.%L
Time_Keep On
Types pid:integer
## https://docs.fluentbit.io/manual/pipeline/inputs
inputs: |
[INPUT]
Name tail
Path /var/log/containers/*.log
# Adding the 'absl_logs_multiline' does not work as intended.
# It must be specified as a dedicated filter (see below).
Multiline.Parser docker, cri, go, python
Tag kube.*
Mem_Buf_Limit 5MB
Skip_Long_Lines On
[INPUT]
Name systemd
Tag k8s_node.${MY_NODE_NAME}
Systemd_Filter _SYSTEMD_UNIT=containerd.service
Systemd_Filter _SYSTEMD_UNIT=etcd.service
Systemd_Filter _SYSTEMD_UNIT=kube-apiserver.service
Systemd_Filter _SYSTEMD_UNIT=kube-controller-manager.service
Systemd_Filter _SYSTEMD_UNIT=kube-scheduler.service
Systemd_Filter _SYSTEMD_UNIT=kubelet.service
Systemd_Filter _SYSTEMD_UNIT=sshd.service
Read_From_Tail On
[INPUT]
Name systemd
Tag kernel.${MY_NODE_NAME}
Systemd_Filter _TRANSPORT=kernel
Read_From_Tail On
## https://docs.fluentbit.io/manual/pipeline/filters
filters: |
[FILTER]
Name kubernetes
Match kube.*
Merge_Log On
Keep_Log Off
K8S-Logging.Parser On
K8S-Logging.Exclude On
# Joins multiple Abseil logs into a single line.
[FILTER]
Name multiline
Match kube.*
Multiline.Key_content log
Multiline.Parser absl_logs_multiline
# Applies the absl_logs parser to the 'log' field.
[FILTER]
Name parser
Match kube.*
Key_Name log
Parser absl_logs
[FILTER]
Name modify
Match kube.*
Condition Key_value_equals level info
Add severity INFO
[FILTER]
Name modify
Match kube.*
Condition Key_value_equals level warning
Add severity WARNING
[FILTER]
Name modify
Match kube.*
Condition Key_value_equals level warn
Add severity WARNING
[FILTER]
Name modify
Match kube.*
Condition Key_value_equals level error
Add severity ERROR
[FILTER]
Name modify
Match kube.*
Condition Key_value_matches log ^.*\[\sinfo\].*$
Add severity INFO
[FILTER]
Name modify
Match kube.*
Condition Key_value_matches log ^.*\[\swarn\].*$
Add severity WARNING
[FILTER]
Name modify
Match kube.*
Condition Key_value_matches log .*\[error\].*
Add severity ERROR
[FILTER]
Name modify
Match kube.*
Condition Key_value_matches log .*(type="Info"|level=info).*
Add severity INFO
[FILTER]
Name modify
Match kube.*
Condition Key_value_matches log .*(type="Warning"|level=warning).*
Add severity WARNING
[FILTER]
Name modify
Match kube.*
Condition Key_value_matches log .*(type="Error"|level=error).*
Add severity ERROR
# We're setting the logName here and below to avoid a high cardinality field on stackdriver
# See https://github.com/fluent/fluent-bit/issues/9897
[FILTER]
Name modify
Match kube.*
Condition Key_value_equals stream stderr
Add severity ERROR
Set logging.googleapis.com/logName stderr
[FILTER]
Name modify
Match kube.*
Condition Key_value_equals stream stdout
Add severity INFO
Set logging.googleapis.com/logName stdout
[FILTER]
Name modify
Match kube.*
Remove _p
[FILTER]
Name modify
Match kube.*
Rename log message
## https://docs.fluentbit.io/manual/pipeline/outputs
outputs: |
[OUTPUT]
Name stackdriver
Match kube.*
Tag_Prefix kube.var.log.containers.
Resource k8s_container
k8s_cluster_name MY_ROBOT
k8s_cluster_location MY_CLUSTER_LOCATION
severity_key severity
Workers 1
[OUTPUT]
Name stackdriver
Match k8s_node.*
Resource k8s_node
custom_k8s_regex ^(?.*)$
k8s_cluster_name MY_ROBOT
k8s_cluster_location MY_CLUSTER_LOCATION
Workers 1
[OUTPUT]
Name stackdriver
Match kernel.*
Resource k8s_node
tag_prefix kernel.
custom_k8s_regex ^(?.*)$
k8s_cluster_name MY_ROBOT
k8s_cluster_location MY_CLUSTER_LOCATION
Workers 1
volumeMounts:
- name: config
mountPath: /fluent-bit/etc/fluent-bit.conf
subPath: fluent-bit.conf
- name: config
mountPath: /fluent-bit/etc/custom_parsers.conf
subPath: custom_parsers.conf
daemonSetVolumes:
- name: varlog
hostPath:
path: /var/log
- name: runlog
hostPath:
path: /run/log
- name: etcmachineid
hostPath:
path: /etc/machine-id
type: File
daemonSetVolumeMounts:
- name: varlog
mountPath: /var/log
- name: runlog
mountPath: /run/log
- name: etcmachineid
mountPath: /etc/machine-id
readOnly: true
tolerations:
- key: "rtpc"
operator: "Exists"
effect: "NoSchedule"
================================================
FILE: src/app_charts/base/relay-dashboard.json
================================================
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [],
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 0
},
"id": 2,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "8.0.5",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"exemplar": true,
"expr": "sum by (method) (rate(broker_requests[$__rate_interval]))",
"interval": "",
"legendFormat": "method=\"{{method}}\"",
"refId": "A"
}
],
"title": "Broker Requests by method",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 0
},
"id": 11,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "8.0.5",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"exemplar": true,
"expr": "sum by (method) (rate(broker_responses{result=\"ok\"}[$__rate_interval]))",
"interval": "",
"legendFormat": "method=\"{{method}}\"",
"refId": "A"
}
],
"title": "# of successful Broker Responses by method",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
"y": 0
},
"id": 8,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "8.0.5",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"exemplar": true,
"expr": "sum by (method,result) (rate(broker_responses{result!~\"(ok|timeout)\"}[$__rate_interval]))",
"interval": "",
"legendFormat": "method=\"{{method}}\",result=\"{{result}}\"",
"refId": "A"
}
],
"title": "# of bad Broker Responses by method",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 7
},
"id": 4,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "8.0.5",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"exemplar": true,
"expr": "sum by (job) (rate(broker_requests[$__rate_interval])) ",
"interval": "",
"legendFormat": "job=\"{{job}}\"",
"refId": "A"
}
],
"title": "Broker Requests by relay ",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 7
},
"id": 14,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "8.0.5",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"exemplar": true,
"expr": "sum by (job) (rate(broker_responses{result=\"ok\"}[$__rate_interval]))",
"interval": "",
"legendFormat": "job=\"{{job}}\"",
"refId": "A"
}
],
"title": "# of successful Broker Responses by relay",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
"y": 7
},
"id": 13,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "8.0.5",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"exemplar": true,
"expr": "sum by (job,result) (rate(broker_responses{result!~\"(ok|timeout)\"}[$__rate_interval]))",
"interval": "",
"legendFormat": "job=\"{{job}}\",result=\"{{result}}\"",
"refId": "A"
}
],
"title": "# of bad Broker Responses by relay",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 14
},
"id": 12,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "8.0.5",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"exemplar": true,
"expr": "sum by (backend) (rate(broker_requests[$__rate_interval]))",
"interval": "",
"legendFormat": "backend=\"{{backend}}\"",
"refId": "A"
}
],
"title": "Broker Requests by client",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 14
},
"id": 15,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "8.0.5",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"exemplar": true,
"expr": "sum by (backend) (rate(broker_responses{result=\"ok\"}[$__rate_interval]))",
"interval": "",
"legendFormat": "backend=\"{{backend}}\"",
"refId": "A"
}
],
"title": "# of successful Broker Responses by client",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
"y": 14
},
"id": 16,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "8.0.5",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"exemplar": true,
"expr": "sum by (backend,result) (rate(broker_responses{result!~\"(ok|timeout)\"}[$__rate_interval]))",
"interval": "",
"legendFormat": "backend=\"{{backend}}\",result=\"{{result}}\"",
"refId": "A"
}
],
"title": "# of bad Broker Responses by client",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#b4ff00",
"colorScale": "sqrt",
"colorScheme": "interpolateOranges",
"exponent": 0.5,
"mode": "spectrum"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"description": "Request duration buckets (in seconds) over time.",
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 12,
"w": 16,
"x": 0,
"y": 21
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 6,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {
"yBuckets": {
"scale": {
"type": "linear"
}
}
},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#b4ff00",
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Oranges",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"show": true,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "9.1.7",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"editorMode": "code",
"expr": "sum(rate(broker_responses_durations_bucket[$__interval])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{le}}",
"range": true,
"refId": "A"
}
],
"title": "Request duration (in seconds) histogram",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"format": "short",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
},
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 12,
"w": 8,
"x": 16,
"y": 21
},
"id": 10,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"editorMode": "code",
"exemplar": true,
"expr": "count(sum by(backend) (rate(broker_requests{method=\"server_request\", job=\"kubernetes-relay-server\"}[$__rate_interval]) > 0))",
"interval": "",
"legendFormat": "number of backends",
"range": true,
"refId": "A"
}
],
"title": "Number of backends online",
"type": "timeseries"
},
{
"cards": {},
"color": {
"cardColor": "#b4ff00",
"colorScale": "sqrt",
"colorScheme": "interpolateOranges",
"exponent": 0.5,
"mode": "spectrum"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"description": "Backend request duration buckets (in seconds) over time.",
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 12,
"w": 16,
"x": 0,
"y": 33
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 17,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#b4ff00",
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Oranges",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"show": true,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "9.1.7",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"editorMode": "code",
"expr": "sum(rate(broker_backend_responses_durations_bucket[$__interval])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{le}}",
"range": true,
"refId": "A"
}
],
"title": "Backend request duration (in seconds) histogram",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"format": "short",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
},
{
"cards": {},
"color": {
"cardColor": "#b4ff00",
"colorScale": "sqrt",
"colorScheme": "interpolateOranges",
"exponent": 0.5,
"mode": "spectrum"
},
"dataFormat": "tsbuckets",
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"description": "Relay overhead duration buckets (in seconds) over time.",
"fieldConfig": {
"defaults": {
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"scaleDistribution": {
"type": "linear"
}
}
},
"overrides": []
},
"gridPos": {
"h": 12,
"w": 16,
"x": 0,
"y": 45
},
"heatmap": {},
"hideZeroBuckets": false,
"highlightCards": true,
"id": 18,
"legend": {
"show": false
},
"maxDataPoints": 25,
"options": {
"calculate": false,
"calculation": {},
"cellGap": 2,
"cellValues": {},
"color": {
"exponent": 0.5,
"fill": "#b4ff00",
"mode": "scheme",
"reverse": false,
"scale": "exponential",
"scheme": "Oranges",
"steps": 128
},
"exemplars": {
"color": "rgba(255,0,255,0.7)"
},
"filterValues": {
"le": 1e-9
},
"legend": {
"show": false
},
"rowsFrame": {
"layout": "auto"
},
"showValue": "never",
"tooltip": {
"show": true,
"yHistogram": false
},
"yAxis": {
"axisPlacement": "left",
"reverse": false,
"unit": "s"
}
},
"pluginVersion": "9.1.7",
"reverseYBuckets": false,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"editorMode": "code",
"expr": "sum(rate(broker_overhead_durations_bucket[$__interval])) by (le)",
"format": "heatmap",
"interval": "",
"legendFormat": "{{le}}",
"range": true,
"refId": "A"
}
],
"title": "Relay overhead duration (in seconds) histogram",
"tooltip": {
"show": true,
"showHistogram": false
},
"type": "heatmap",
"xAxis": {
"show": true
},
"yAxis": {
"format": "short",
"logBase": 1,
"show": true
},
"yBucketBound": "auto"
}
],
"schemaVersion": 37,
"style": "dark",
"tags": [
"cloud-robotics"
],
"templating": {
"list": []
},
"time": {
"from": "now-30m",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Http Relay",
"uid": "p_rQjdS7k",
"version": 2,
"weekStart": ""
}
================================================
FILE: src/app_charts/base/robot/app-management.yaml
================================================
{{ if eq .Values.app_management "true" }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: chart-assignment-controller
spec:
replicas: 1
selector:
matchLabels:
app: chart-assignment-controller
template:
metadata:
labels:
app: chart-assignment-controller
spec:
containers:
- name: chart-assignment-controller
image: {{ .Values.registry }}{{ .Values.images.chart_assignment_controller }}
args:
- --cloud-cluster=false
- --webhook-enabled={{ .Values.webhook.enabled }}
- --webhook-port=9876
- --cert-dir=/tls
- --trace-stackdriver-project-id={{ .Values.project }}
env:
- name: GOOGLE_CLOUD_PROJECT
value: {{ .Values.project }}
- name: ROBOT_NAME
value: "{{ .Values.robot.name }}"
livenessProbe:
initialDelaySeconds: 15
periodSeconds: 10
httpGet:
port: 8080
path: /healthz
ports:
- name: webhook
containerPort: 9876
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
volumeMounts:
- name: tls
mountPath: /tls
- name: tmp
mountPath: /tmp
volumes:
- name: tls
secret:
secretName: chart-assignment-controller-tls
- name: tmp
emptyDir:
medium: Memory
securityContext:
runAsNonRoot: true
runAsUser: 65532
runAsGroup: 65532
---
# The chart assignment controller runs admission webhooks, which need to be served via TLS.
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: chart-assignment-controller
spec:
secretName: chart-assignment-controller-tls
commonName: chart-assignment-controller.{{ .Release.Namespace }}.svc
dnsNames:
- chart-assignment-controller.{{ .Release.Namespace }}.svc
- chart-assignment-controller.{{ .Release.Namespace }}.svc.cluster.local
issuerRef:
kind: ClusterIssuer
name: cluster-authority
---
apiVersion: v1
kind: Service
metadata:
name: chart-assignment-controller
spec:
type: ClusterIP
ports:
- port: 443
targetPort: webhook
selector:
app: chart-assignment-controller
---
{{ if eq .Values.webhook.enabled "true" }}
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: chart-assignment-controller
annotations:
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/chart-assignment-controller
webhooks:
- name: chartassignments.apps.cloudrobotics.com
admissionReviewVersions: ["v1"]
failurePolicy: Fail
clientConfig:
service:
namespace: {{ .Release.Namespace }}
name: chart-assignment-controller
path: /chartassignment/validate
rules:
- apiGroups:
- apps.cloudrobotics.com
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- chartassignments
sideEffects: None
{{ end }}
{{ end }}
================================================
FILE: src/app_charts/base/robot/cert-manager-certificates.yaml
================================================
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: selfsigned-ca
spec:
isCA: true
duration: 8760h # 1year. This needs to be at least 3x 90days
commonName: selfsigned-ca
secretName: cluster-authority
privateKey:
algorithm: ECDSA
size: 256
issuerRef:
name: selfsigned-issuer
kind: ClusterIssuer
group: cert-manager.io
================================================
FILE: src/app_charts/base/robot/cert-manager-issuers.yaml
================================================
# A self-signing issuer for cluster-internal services.
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: selfsigned-issuer
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: cluster-authority
spec:
ca:
secretName: cluster-authority
================================================
FILE: src/app_charts/base/robot/cert-manager.yaml
================================================
{{ if eq .Values.app_management "true" }}
# This includes all resources expanded from the cert-manager chart using
# the values in ../cert-manager-robot.values.yaml.
# Some pseudo-variables that were inserted there are replaced with actual runtime values.
{{ .Files.Get "files/cert-manager-chart.robot.yaml" | replace "HELM-NAMESPACE" .Release.Namespace }}
{{ end }}
================================================
FILE: src/app_charts/base/robot/cr-syncer.yaml
================================================
{{ if eq .Values.cr_syncer "true" }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: cr-syncer
spec:
replicas: 1
selector:
matchLabels:
app: cr-syncer
template:
metadata:
labels:
app: cr-syncer
spec:
containers:
- name: cr-syncer
image: {{ .Values.registry }}{{ .Values.images.cr_syncer }}
args:
- --listen-address=:8080
- --remote-server={{ .Values.domain }}
- --robot-name={{ .Values.robot.name }}
- --use-robot-jwt=true
- --verbose=false
ports:
- name: http
containerPort: 8080
livenessProbe:
httpGet:
path: /health
port: 8080
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 120
timeoutSeconds: 60
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
securityContext:
runAsNonRoot: true
runAsUser: 65532
runAsGroup: 65532
---
apiVersion: v1
kind: Service
metadata:
name: cr-syncer
labels:
app: cr-syncer
spec:
selector:
app: cr-syncer
ports:
- name: http
port: 80
targetPort: 8080
{{ end }}
================================================
FILE: src/app_charts/base/robot/fluent-bit.yaml
================================================
# !!! DO NOT EDIT THIS FILE !!!
# This file is autogenerated using src/app_charts/base/fluent-bit-helm.sh.
# See src/app_charts/base/README.md for update instructions.
{{ if and (eq .Values.robot_authentication "true") (eq .Values.fluentbit "true") }}
---
# Source: fluent-bit/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluent-bit
namespace: default
labels:
helm.sh/chart: fluent-bit-0.48.9
app.kubernetes.io/name: fluent-bit
app.kubernetes.io/instance: fluent-bit
app.kubernetes.io/version: "3.2.8"
app.kubernetes.io/managed-by: Helm
---
# Source: fluent-bit/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: fluent-bit
namespace: default
labels:
helm.sh/chart: fluent-bit-0.48.9
app.kubernetes.io/name: fluent-bit
app.kubernetes.io/instance: fluent-bit
app.kubernetes.io/version: "3.2.8"
app.kubernetes.io/managed-by: Helm
data:
custom_parsers.conf: |
# Merges multi-line Abseil logs. The regexs assume that any line which does not start
# with an Abseil log preamble is part of the previous log message. No assumptions on
# indentation or similar are made.
[MULTILINE_PARSER]
Name absl_logs_multiline
Type regex
Flush_timeout 1000
#
# Regex rules for multiline parsing
# ---------------------------------
#
# configuration hints:
#
# - first state always has the name: start_state
# - every field in the rule must be inside double quotes
#
# rules | state name | regex pattern | next state
# ------|---------------|---------------------------------------------------------------------------------------------
Rule "start_state" "/^((W|I|E|F))([0-9]{4}) ([^ ]+)\s+([-0-9]+) (\S+:\d+)] (.*)$/" "cont"
Rule "cont" "/^(?!(((W|I|E|F))([0-9]{4}) ([^ ]+)\s+([-0-9]+) (\S+:\d+)]))(.*)$/" "cont"
# A parser for Abseil log files: https://abseil.io/docs/cpp/guides/logging#prefix
[PARSER]
Name absl_logs
Format regex
Regex ^(?(W|I|E|F))([0-9]{4}) (?[^ ]+)\s+(?[-0-9]+) (?\S+:\d+)] (?[\s\S]*)$
Time_Key time
Time_Format %H:%M:%S.%L
Time_Keep On
Types pid:integer
fluent-bit.conf: |
[SERVICE]
Daemon Off
Flush 1
Log_Level info
Parsers_File custom_parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port 2020
Health_Check On
[INPUT]
Name tail
Path /var/log/containers/*.log
# Adding the 'absl_logs_multiline' does not work as intended.
# It must be specified as a dedicated filter (see below).
Multiline.Parser docker, cri, go, python
Tag {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Mem_Buf_Limit 5MB
Skip_Long_Lines On
[INPUT]
Name systemd
Tag k8s_node.${MY_NODE_NAME}
Systemd_Filter _SYSTEMD_UNIT=containerd.service
Systemd_Filter _SYSTEMD_UNIT=etcd.service
Systemd_Filter _SYSTEMD_UNIT=kube-apiserver.service
Systemd_Filter _SYSTEMD_UNIT=kube-controller-manager.service
Systemd_Filter _SYSTEMD_UNIT=kube-scheduler.service
Systemd_Filter _SYSTEMD_UNIT=kubelet.service
Systemd_Filter _SYSTEMD_UNIT=sshd.service
Read_From_Tail On
[INPUT]
Name systemd
Tag kernel.${MY_NODE_NAME}
Systemd_Filter _TRANSPORT=kernel
Read_From_Tail On
[FILTER]
Name kubernetes
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Merge_Log On
Keep_Log Off
K8S-Logging.Parser On
K8S-Logging.Exclude On
# Joins multiple Abseil logs into a single line.
[FILTER]
Name multiline
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Multiline.Key_content log
Multiline.Parser absl_logs_multiline
# Applies the absl_logs parser to the 'log' field.
[FILTER]
Name parser
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Key_Name log
Parser absl_logs
[FILTER]
Name modify
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Condition Key_value_equals level info
Add severity INFO
[FILTER]
Name modify
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Condition Key_value_equals level warning
Add severity WARNING
[FILTER]
Name modify
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Condition Key_value_equals level warn
Add severity WARNING
[FILTER]
Name modify
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Condition Key_value_equals level error
Add severity ERROR
[FILTER]
Name modify
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Condition Key_value_matches log ^.*\[\sinfo\].*$
Add severity INFO
[FILTER]
Name modify
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Condition Key_value_matches log ^.*\[\swarn\].*$
Add severity WARNING
[FILTER]
Name modify
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Condition Key_value_matches log .*\[error\].*
Add severity ERROR
[FILTER]
Name modify
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Condition Key_value_matches log .*(type="Info"|level=info).*
Add severity INFO
[FILTER]
Name modify
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Condition Key_value_matches log .*(type="Warning"|level=warning).*
Add severity WARNING
[FILTER]
Name modify
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Condition Key_value_matches log .*(type="Error"|level=error).*
Add severity ERROR
# We're setting the logName here and below to avoid a high cardinality field on stackdriver
# See https://github.com/fluent/fluent-bit/issues/9897
[FILTER]
Name modify
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Condition Key_value_equals stream stderr
Add severity ERROR
Set logging.googleapis.com/logName stderr
[FILTER]
Name modify
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Condition Key_value_equals stream stdout
Add severity INFO
Set logging.googleapis.com/logName stdout
[FILTER]
Name modify
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Remove _p
[FILTER]
Name modify
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Rename log message
[OUTPUT]
Name stackdriver
Match {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.*
Tag_Prefix {{ empty .Values.log_prefix_subdomain | ternary "" (print .Values.log_prefix_subdomain "." ) -}} kube.var.log.containers.
Resource k8s_container
k8s_cluster_name {{ .Values.robot.name }}
k8s_cluster_location europe-west1-c
severity_key severity
Workers 1
[OUTPUT]
Name stackdriver
Match k8s_node.*
Resource k8s_node
custom_k8s_regex ^(?.*)$
k8s_cluster_name {{ .Values.robot.name }}
k8s_cluster_location europe-west1-c
Workers 1
[OUTPUT]
Name stackdriver
Match kernel.*
Resource k8s_node
tag_prefix kernel.
custom_k8s_regex ^(?.*)$
k8s_cluster_name {{ .Values.robot.name }}
k8s_cluster_location europe-west1-c
Workers 1
---
# Source: fluent-bit/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fluent-bit
labels:
helm.sh/chart: fluent-bit-0.48.9
app.kubernetes.io/name: fluent-bit
app.kubernetes.io/instance: fluent-bit
app.kubernetes.io/version: "3.2.8"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
verbs:
- get
- list
- watch
---
# Source: fluent-bit/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fluent-bit
labels:
helm.sh/chart: fluent-bit-0.48.9
app.kubernetes.io/name: fluent-bit
app.kubernetes.io/instance: fluent-bit
app.kubernetes.io/version: "3.2.8"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fluent-bit
subjects:
- kind: ServiceAccount
name: fluent-bit
namespace: default
---
# Source: fluent-bit/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: fluent-bit
namespace: default
labels:
helm.sh/chart: fluent-bit-0.48.9
app.kubernetes.io/name: fluent-bit
app.kubernetes.io/instance: fluent-bit
app.kubernetes.io/version: "3.2.8"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 2020
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: fluent-bit
app.kubernetes.io/instance: fluent-bit
---
# Source: fluent-bit/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluent-bit
namespace: default
labels:
helm.sh/chart: fluent-bit-0.48.9
app.kubernetes.io/name: fluent-bit
app.kubernetes.io/instance: fluent-bit
app.kubernetes.io/version: "3.2.8"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: fluent-bit
app.kubernetes.io/instance: fluent-bit
template:
metadata:
labels:
app.kubernetes.io/name: fluent-bit
app.kubernetes.io/instance: fluent-bit
annotations:
checksum/config: 62f3260bfeab5914c8b602bc04f3e1aa7bd3fe9691a18590d746d3ddc09fd432
spec:
serviceAccountName: fluent-bit
hostNetwork: false
dnsPolicy: ClusterFirst
containers:
- name: fluent-bit
image: "cr.fluentbit.io/fluent/fluent-bit:3.2.8"
imagePullPolicy: IfNotPresent
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- /fluent-bit/bin/fluent-bit
args:
- --workdir=/fluent-bit/etc
- --config=/fluent-bit/etc/conf/fluent-bit.conf
ports:
- name: http
containerPort: 2020
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /api/v1/health
port: http
volumeMounts:
- name: config
mountPath: /fluent-bit/etc/conf
- mountPath: /var/log
name: varlog
- mountPath: /run/log
name: runlog
- mountPath: /etc/machine-id
name: etcmachineid
readOnly: true
volumes:
- name: config
configMap:
name: fluent-bit
- hostPath:
path: /var/log
name: varlog
- hostPath:
path: /run/log
name: runlog
- hostPath:
path: /etc/machine-id
type: File
name: etcmachineid
tolerations:
- effect: NoSchedule
key: rtpc
operator: Exists
{{ end }}
================================================
FILE: src/app_charts/base/robot/fluentd-gcp-addon.yaml
================================================
{{ if and (eq .Values.robot_authentication "true") (eq .Values.fluentd "true") }}
{{ .Files.Get "files/fluentd-gcp-configmap.yaml" }}
---
{{ .Files.Get "files/fluentd-gcp-ds.yaml" | replace "/var/lib/docker" .Values.docker_data_root }}
{{ end }}
================================================
FILE: src/app_charts/base/robot/fluentd-metrics.yaml
================================================
# Adds a Prometheus ServiceMonitor for scraping the fluentd metrics.
# By default, google-fluentd exports some Prometheus metrics on port 24231.
{{ if and (eq .Values.robot_authentication "true") (eq .Values.fluentd "true") }}
apiVersion: v1
kind: Service
metadata:
name: fluentd-metrics
labels:
app: fluentd-metrics
spec:
ports:
- port: 24231
name: metrics
selector:
k8s-app: fluentd-gcp
type: ClusterIP
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: fluentd-metrics
labels:
prometheus: kube-prometheus
spec:
endpoints:
- port: metrics
path: /metrics
interval: 10s
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
selector:
matchLabels:
app: fluentd-metrics
{{ end }}
================================================
FILE: src/app_charts/base/robot/gcr-credential-refresher.yaml
================================================
{{ if and (eq .Values.robot_authentication "true") (ne .Values.project "") (eq .Values.running_on_gke "false") }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: gcr-credential-refresher
spec:
replicas: 1
selector:
matchLabels:
app: gcr-credential-refresher
template:
metadata:
labels:
app: gcr-credential-refresher
spec:
containers:
- image: {{ .Values.registry }}{{ .Values.images.gcr_credential_refresher }}
args:
- --robot_id_file=/credentials/robot-id.json
- --service_account={{ .Values.robot.defaultSAName }}
name: gcr-credential-refresher
resources:
requests:
cpu: "1m"
memory: "50Mi"
limits:
cpu: "10m"
memory: "200Mi"
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /credentials
name: robot-id
securityContext:
runAsNonRoot: true
runAsUser: 65532
runAsGroup: 65532
volumes:
- name: robot-id
secret:
secretName: robot-auth
items:
- key: json
path: robot-id.json
{{ end }}
================================================
FILE: src/app_charts/base/robot/metadata-server.yaml
================================================
{{ if and (eq .Values.robot_authentication "true") (ne .Values.project "") }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: metadata-server
spec:
selector:
matchLabels:
name: metadata-server
template:
metadata:
labels:
name: metadata-server
spec:
hostNetwork: true
volumes:
- name: robot-id
secret:
secretName: robot-auth
items:
- key: json
path: robot-id.json
# Mounting /etc/ssl is necessary if ca-certificates is not installed in the container image.
- name: ssl
hostPath:
path: /etc/ssl
containers:
- name: metadata-server
image: {{ .Values.registry }}{{ .Values.images.metadata_server }}
args:
- --bind_ip=127.0.0.1
- --port=8965
- --robot_id_file=/credentials/robot-id.json
- --source_cidr={{ .Values.pod_cidr }}
- --running_on_gke={{ .Values.running_on_gke }}
- --service_account={{ .Values.robot.defaultSAName }}
securityContext:
readOnlyRootFilesystem: true
capabilities:
drop:
- all
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /credentials
name: robot-id
- mountPath: /etc/ssl
name: ssl
# This daemon-set need to run on all nodes for auth to work.
tolerations:
- operator: "Exists"
effect: "NoSchedule"
priorityClassName: system-cluster-critical
{{ end }}
================================================
FILE: src/app_charts/base/values-cloud.yaml
================================================
domain: "example.com"
ingress_ip: ""
project: "my-gcp-project"
region: "us-north1-a"
deploy_environment: "GCP"
registry: "gcr.io/my-gcp-project"
owner_email: "info@example.com"
# Setting app_management to "false" will remove layer 2 (app-rollout/chart-assignment-controller, etc).
app_management: "true"
# Setting onprem_federation to "false" will remove cloud policy for the cr-syncer (part of layer 1).
onprem_federation: "true"
oauth2_proxy:
cookie_secret: ""
client_id: ""
client_secret: ""
# Certificate provider
# This configures different cert-manager templates to emit certificates using the chosen authority.
certificate_provider: ""
# Certificate authority used to sign in-cluster certificates.
certificate_authority:
key: ""
crt: ""
================================================
FILE: src/app_charts/base/values-robot.yaml
================================================
domain: "example.com"
project: "my-gcp-project"
deploy_environment: "GCP"
registry: "gcr.io/my-gcp-project"
# Setting app_management to "false" will remove layer 2 (app-rollout/chart-assignment-controller, etc).
app_management: "true"
# Setting cr_syncer to "false" will remove the cr-syncer.
cr_syncer: "true"
# Setting fluentd to "false" will remove the fluentd stackdriver integration.
fluentd: "true"
# Setting fluentbit to "true" will enable the fluentbit stackdriver integration.
fluentbit: "false"
# Setting log_prefix_subdomain to any non-empty string will prepend a subdomain to the
# `Tag_Prefix` configured on fluentbit (don't add a "." at the end to the subdomain)
log_prefix_subdomain: ""
# docker_data_root should match "data-root" in /etc/docker/daemon.json.
docker_data_root: "/var/lib/docker"
# Setting robot_authentication to "false" will remove layer 1 resources that are not
# needed when simulating a robot with a GKE cluster, such as the
# metadata-server or gcr-credential-refresher.
robot_authentication: "true"
# If running on GKE, skip setup steps that are unnecessary and will fail.
running_on_gke: "false"
robot:
name: ""
# Name of the default GCP Service Account used by robot when connecting to cloud.
defaultSAName: "robot-service"
webhook:
enabled: "true"
================================================
FILE: src/app_charts/k8s-relay/BUILD.bazel
================================================
load("//bazel:app.bzl", "app")
load("//bazel:app_chart.bzl", "app_chart")
app_chart(
name = "k8s-relay-cloud",
images = {"http-relay-server": "//src/go/cmd/http-relay-server:http-relay-server-image"},
values = "values-cloud.yaml",
)
app_chart(
name = "k8s-relay-robot",
images = {"http-relay-client": "//src/go/cmd/http-relay-client:http-relay-client-image"},
values = "values-robot.yaml"
)
app(
name = "k8s-relay",
charts = [
":k8s-relay-cloud",
":k8s-relay-robot",
],
visibility = ["//visibility:public"],
)
================================================
FILE: src/app_charts/k8s-relay/cloud/ingress.yaml
================================================
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kubernetes-relay-client
annotations:
nginx.ingress.kubernetes.io/auth-url: "http://token-vendor.default.svc.cluster.local/apis/core.token-vendor/v1/token.verify"
nginx.ingress.kubernetes.io/client-body-buffer-size: "50m"
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
# proxy-read-timeout sets how long nginx will allow a request to be idle
# for. This is important for requests like `kubectl logs -f` where the logs
# may be silent for some time.
nginx.ingress.kubernetes.io/proxy-read-timeout: "86400"
nginx.ingress.kubernetes.io/rewrite-target: /client/$2
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.domain }}
http:
paths:
- path: /apis/core.kubernetes-relay/client($|/)(.*)
pathType: Prefix
backend:
service:
name: kubernetes-relay-server
port:
number: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kubernetes-relay-server
annotations:
nginx.ingress.kubernetes.io/client-body-buffer-size: "50m"
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
nginx.ingress.kubernetes.io/rewrite-target: /server/$2
nginx.ingress.kubernetes.io/auth-url: "http://token-vendor.default.svc.cluster.local/apis/core.token-vendor/v1/token.verify?robots=true"
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.domain }}
http:
paths:
- path: /apis/core.kubernetes-relay/server($|/)(.*)
pathType: Prefix
backend:
service:
name: kubernetes-relay-server
port:
number: 80
================================================
FILE: src/app_charts/k8s-relay/cloud/kubernetes-relay-server.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubernetes-relay-server
spec:
replicas: 1
selector:
matchLabels:
app: kubernetes-relay-server
template:
metadata:
labels:
app: kubernetes-relay-server
spec:
containers:
- name: kubernetes-relay-server
image: {{ .Values.registry }}{{ .Values.images.http_relay_server }}
args:
- --log_level=4 # WARN
- --port=8080
env:
# Enable tracebacks for debugging deadlocks or hanging requests.
- name: GOTRACEBACK
value: all
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 15
timeoutSeconds: 10
ports:
- name: http
containerPort: 8080
resources:
{{- toYaml .Values.kubernetes_relay_server.resources | nindent 10 }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
securityContext:
runAsNonRoot: true
runAsUser: 65532
runAsGroup: 65532
================================================
FILE: src/app_charts/k8s-relay/cloud/service-monitor.yaml
================================================
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: kubernetes-relay-server
labels:
prometheus: kube-prometheus
spec:
endpoints:
- port: http
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
{{- if .Values.prometheus.serviceMonitor.metricRelabelings }}
metricRelabelings: {{- tpl (toYaml .Values.prometheus.serviceMonitor.metricRelabelings | nindent 6) . }}
{{- end }}
selector:
matchLabels:
app: kubernetes-relay-server
================================================
FILE: src/app_charts/k8s-relay/cloud/service.yaml
================================================
apiVersion: v1
kind: Service
metadata:
name: kubernetes-relay-server
labels:
# This is used by the ServiceMonitor.
app: kubernetes-relay-server
spec:
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
app: kubernetes-relay-server
type: ClusterIP
================================================
FILE: src/app_charts/k8s-relay/robot/kubernetes-relay-client.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubernetes-relay-client
spec:
replicas: 1
selector:
matchLabels:
app: kubernetes-relay-client
template:
metadata:
labels:
app: kubernetes-relay-client
spec:
containers:
- name: kubernetes-relay-client
image: {{ .Values.registry }}{{ .Values.images.http_relay_client }}
args:
- --log_level=4 # WARN
- --backend_address=$(KUBERNETES_SERVICE_HOST):$(KUBERNETES_SERVICE_PORT)
- --backend_scheme=https
- --authentication_token_file=/var/run/secrets/kubernetes.io/serviceaccount/token
- --root_ca_file=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- --relay_address={{ .Values.domain }}
- --relay_prefix=/apis/core.kubernetes-relay
- --server_name={{ .Values.robot.name }}
- --disable_http2
resources:
{{- toYaml .Values.kubernetes_relay_client.resources | nindent 10 }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
securityContext:
runAsNonRoot: true
runAsUser: 65532
runAsGroup: 65532
================================================
FILE: src/app_charts/k8s-relay/values-cloud.yaml
================================================
domain: "example.com"
project: "my-gcp-project"
registry: "gcr.io/my-gcp-project"
robots: []
# MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
prometheus:
serviceMonitor:
metricRelabelings: []
kubernetes_relay_server:
resources:
requests:
memory: "16Mi"
cpu: "100m"
================================================
FILE: src/app_charts/k8s-relay/values-robot.yaml
================================================
kubernetes_relay_client:
resources:
requests:
memory: "10Mi"
cpu: "50m"
================================================
FILE: src/app_charts/mission-crd/BUILD.bazel
================================================
load("//bazel:app.bzl", "app")
load("//bazel:app_chart.bzl", "app_chart")
app_chart(
name = "mission-crd-robot",
extra_templates = ["mission_crd.yaml"],
values = "values.yaml",
)
app_chart(
name = "mission-crd-cloud",
extra_templates = ["mission_crd.yaml"],
values = "values.yaml",
)
app(
name = "mission-crd",
charts = [
":mission-crd-cloud",
":mission-crd-robot",
],
visibility = ["//visibility:public"],
)
================================================
FILE: src/app_charts/mission-crd/mission_crd.yaml
================================================
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cr-syncer.cloudrobotics.com/filter-by-robot-name: "true"
cr-syncer.cloudrobotics.com/spec-source: cloud
name: missions.mission.cloudrobotics.com
spec:
group: mission.cloudrobotics.com
names:
kind: Mission
plural: missions
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
properties:
spec:
properties:
actions:
items:
properties:
charge:
properties:
charger_name:
type: string
target_battery_percent:
description: |-
If actually performing the charge action, charge until this level is
reached.
format: int64
type: integer
threshold_battery_percent:
description: |-
Only perform the charge action if battery level is lower than this
threshold, otherwise do nothing.
format: int64
type: integer
type: object
get_trolley:
properties:
dock_name:
description: |-
Name of dock to get trolley from, eg "1". This should correspond to
the name of the mission created by ROEQ's Create_docking_station.exe, eg
"ROEQ_Get cart 1".
type: string
type: object
move_to_named_position:
properties:
target_name:
description: String id of target position as created in
MiR's web frontend.
type: string
type: object
return_trolley:
properties:
dock_name:
description: |-
Name of dock to return trolley to, eg "1". This should correspond to
the name of the mission created by ROEQ's Create_docking_station.exe, eg
"ROEQ_Return cart 1".
type: string
type: object
type: object
type: array
time_out_sec:
format: float
type: number
type: object
status:
properties:
active_action:
properties:
id:
title: The ID of the currently executed action
type: string
index:
format: int64
title: The index of the currently executed action
type: integer
status:
description: |2-
- DEFAULT: Used if no other specific status applies.
- DOCKING: Robot is currently docking.
- MOVING: Robot is moving.
- TIMEOUT: Mission duration exceeded MissionSpec.time_out_sec. Can only be set if
MissionStatus.status is FAILED.
enum:
- DEFAULT
- DOCKING
- MOVING
- TIMEOUT
type: string
title: Information about the currently executed action within a mission
type: object
message:
type: string
queue_entry_id:
description: Links the goal to the entry in the MiR's mission queue.
format: int64
type: string
status:
description: |-
The various states of the mission execution.
- CREATED: initial state
- ACCEPTED: mission has been validated on
- RUNNING: active state (processing)
- SUCCEEDED: terminal states
enum:
- CREATED
- ACCEPTED
- RUNNING
- SUCCEEDED
- CANCELED
- FAILED
type: string
time_of_actuation:
format: date-time
type: string
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions:
- v1alpha1
================================================
FILE: src/app_charts/mission-crd/values.yaml
================================================
project: "my-gcp-project"
registry: "gcr.io/my-gcp-project"
robot:
name: ""
crd_spec_source: "cloud"
================================================
FILE: src/app_charts/platform-apps/BUILD.bazel
================================================
load("//bazel:app_chart.bzl", "app_chart")
app_chart(
name = "platform-apps-cloud",
extra_templates = [
"//src/app_charts:app_resources",
],
values = "values.yaml",
visibility = ["//visibility:public"],
)
================================================
FILE: src/app_charts/platform-apps/values.yaml
================================================
domain: "example.com"
project: "my-gcp-project"
deploy_environment: "GCP"
registry: "gcr.io/my-gcp-project"
================================================
FILE: src/app_charts/prometheus/BUILD.bazel
================================================
load("//bazel:app.bzl", "app")
load("//bazel:app_chart.bzl", "app_chart")
load("//bazel:build_rules/helm_template.bzl", "helm_template")
helm_template(
name = "prometheus-operator-chart.cloud",
chart = "//third_party/kube-prometheus-stack:kube-prometheus-stack-72.9.1.tgz",
helm_version = 3,
# The namespace will later be replaced with the actual one.
namespace = "HELM-NAMESPACE",
# Pick a short release name as it will be used as a prefix for a lot of resources.
release_name = "prom",
values = "prometheus-cloud.values.yaml",
)
helm_template(
name = "prometheus-operator-chart.robot",
chart = "//third_party/kube-prometheus-stack:kube-prometheus-stack-72.9.1.tgz",
helm_version = 3,
# The namespace will later be replaced with the actual one.
namespace = "HELM-NAMESPACE",
# Pick a short release name as it will be used as a prefix for a lot of resources.
release_name = "prom",
values = "prometheus-robot.values.yaml",
)
app_chart(
name = "prometheus-cloud",
extra_templates = [
"//third_party/kube-prometheus-stack:00-crds.yaml",
],
files = [
":prometheus-operator-chart.cloud",
],
images = {
"http-relay-server": "//src/go/cmd/http-relay-server:http-relay-server-image",
},
values = "values-cloud.yaml",
)
app_chart(
name = "prometheus-robot",
extra_templates = [
"//third_party/kube-prometheus-stack:00-crds.yaml",
],
files = [
":prometheus-operator-chart.robot",
],
images = {
"http-relay-client": "//src/go/cmd/http-relay-client:http-relay-client-image",
"hw-exporter": "//src/go/cmd/hw-exporter:hw-exporter-image",
},
)
app(
name = "prometheus",
charts = [
":prometheus-cloud",
":prometheus-robot",
],
visibility = ["//visibility:public"],
)
================================================
FILE: src/app_charts/prometheus/README.md
================================================
# Prometheus App
The Prometheus app uses the upstream Helm chart
`prometheus-community/prometheus-operator` as a basis for cloud and robot chart
alike.
The folowing caveats exist that explain some choices made:
* The prometheus-operator chart is vendored in `third_party/` as Helm
packaging does not generate reproducible artifacts that can be downloaded and
verified via file checksums at build time.
* Helm cannot template `values.yaml` files themselves. For example, we need to
provide an external URL for Prometheus, which must include the project domain
name. But it is only provided at deploy time as `.Values.domain`.
For this reason, we expand the prometheus-operator chart at build time
and insert pseudo-variables, which are string-replaced by Helm at deploy time
during template processing (see `cloud/prometheus-operator.yaml`).
================================================
FILE: src/app_charts/prometheus/cloud/app.yaml
================================================
apiVersion: app.k8s.io/v1beta1
kind: Application
metadata:
name: "prometheus"
labels:
app.kubernetes.io/name: {{ .Chart.Name }}
app.kubernetes.io/version: {{ .Chart.Version }}
annotations:
kubernetes-engine.cloud.google.com/icon: >-
data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMAAAADACAYAAABS3GwHAAAACXBIWXMAAAsSAAALEgHS3X78AAAVI0lEQVR42u1dCZQVxRX9w8CwySqIBBFxj4NM9x9BEdSAosBMdw+gbDFRQYyiOVFxQcWgGARxIwrqcd+TqBBNIAuQGNfEuEQ4Eaa7B7dIUFwBRQwy5L3u+p72+5f+/3f/qeX1OXfOEWequ17dV/Wq6i2JBD2RPY5Rm3CsZIVj6W0cU+8M2BdQCzgBcDJgKuA8wEWAywCzGS5j/3Ye+52T2d/UsjawLWgT29ZI0PS0MNHrjkrYplYJpNwbMBRwFuA6wBOAVwAbAdsBzYDdJaKZtbWRtb0McD1757H4DbapVzaNTdLA0BPP45p6BRBtT8AowBzAU4C3AF9FQPBSgd/wDmA54CrAGNv71mQFjRw9xc3wlkf4HoCxgMWANYCvOSB7WOC3rmXfPh7Q0zU0Ugh6cpAezAjAQMClgOeY2bFbEnwJeB5wuddHQ6+kEacHSY8z/eGAeYA3BJvli8UuwHrAtYAaMJVaERPUsueR+H0AM5lp06wA6XNtsNFUuhiwD6wMZCZJPNvj0eRIwFLJzJsozSQ8YToJZUWMkYf4nQAzmInTTEQPtSqsA/wUZUcMEpH0Y5IJdkaPx4KbidRF40PANYDe68fWErF4f96YUI3E/x5gIeAzInBk2AK4AfdOTQ01RDROTZ3u7DTnM4FMjQ2AfwikCCjb+Y6ldyfG8UP8DoCfseVaFCK95m3IjdpW7FZZRNPofJQ9MbCliG/pSJ56tmETaYN5B6A9U95qdi4vqmmE9wn1tqnTXUKZZ/3+gCcFI08z25tUBvpxrSQXazgW/YmZ8RO/CnAhYKuARHnYtZLfkN/2L+RelmijvA0vF20z2ZaYGg/50U/nBUHJsS79TN0xtfaSnlThGA0kxkZGfK0123B9LighdgKGZVDoXpy4U8eBz9lK3ZoYXNqs3xewUnAy3OeYNZn61hvwP8nvD3Ds+hKTC3xsy7OP6wCbJJgJ+2dR7i4Cr2qF4H1/LCmUM+ysj05rcyWZHe9uMjIPvFufbMUiuVS4Sf4fG1NysstDfgw7XCGRU1nWzaBb5/krLVfMpeIPGGlHTM9M/mp2sSLLYL/kGLnjclmElmp+RTjG1cT4bxNhpIRemxeF6Pcghb1MTyTi+yGJpwF2SBh2eGjezb7hpVb5j6JKgGN+OmbcUNSXR2vFAtB3Sji4b4bd8MHv3aKwmzWO/SxHNV8i1/ROQBYI7giWC4+59VrYVXBIjJFq7woQ/okcWGCbWitVzJ7WCsx6swuUxxsxfMO/AT8SSGa3SH9zzAZ7iQKxuVMKlMusiN//DvOYXSLYsfFt6Poip9ljeWbPLYoEpo8oUAH2ZVkZotpcHm37ib4aBbw7uVW6PYHj2/wLFdrcDS5EPo2mdym2LKJ3Xx7wNRL1dG2hNErAjjovk3jDmwl6EXIaHcF7X4WZvx1rb5jgx8iXu4YECX6hI9MUSTkYxICC5WQkW7Pj01IS4h4fkPsUwWWI/ZkmOvlHC+TzvoYRsDmC2eugIuV1RQnvXRFMiQ7/fa4EEwlyZ7So5McIro8FEvbjgLZ4O1nid6PnY58iZdavyM0wKt3RaW3NlGQ1/Vi4CDP44L0ArmCC3hzI1oAZo98rsp0vAHsUeUFYUWSalOc2pCW2ZVF0spiUyKW9BLnl1TBwfZWgGy8tQKAky4hWaDubSvF7h781i3jnpAztTJdsX7XKsfQqzsnvnfiIfNw5KY1EZxa5lyj69MK19PYFOshtyrTiwL9ZEh4uLOTaeY6V4xHZue2CNIXG+4vVhQZ9RCDH6wt435IsbdRKeOmI3BrHK/n3B3wkuIDnZOhXssDwzCUly9LQB4QkL/7OMVnGo4ekdRAwlmB/3siPdv+zEgj3qvS+NZlaBTshCtvGhRFdHr4WztszcyIqx/Bu3xsdOe9ZgGvJKp4UYLYkgr04S/+GFnCTPSYimV4U4l135WnjUfK4jZ/8gyN05GppnJGlj+hY9krINvaLSK79Qlwijs3Txk8kL+k0uGXJb3gnFq9JJNQROch0QUj7NJLUH65vej2Xx+tzzzwKcLDkPlivpe5uWmr2nyNZ7pp+eWbkfCdcT0cs35/leNfLeZXI8C7W1kmsABkPLspF/sMkMn12s5vfNnn6/M88bSyIWMb75TiBujFkG/MkVwDk4GHl9u9Hm/gvkgny9yHINDdPGw3RytlLHJDNxBwXUgE0BbxxVwfrLpRj9p8s4SXLJSH6PTyPTb53DLJelOVCqF/Iv68MsXLJkIVvcrnI30XSnJa1IfrejRWCyBKMkmwVg7wnZon5bVNAG2cpEIPxNnKTNr7FYaNj5M9I4PibymxVXebHJO9DSnW3YJPWBwoowZy4yd/HEbM8UT7cU4AM7s6yBB8bk8zbMhfr4PuuK6KdaxRQAOTm92IhP8vZf5ukgqsv8WgSE1DFUhvLrtcSGfIGnVpkjIYKxcNvQ67GMRMdLKlzFQbCdCpADidlaGNxzCvv8rTVJllkOyqsAtuLDUnNdeyZbemXAfd/MuHwQm3y9GPFI2NWgCVpA9yjyHa6efsd+ZXgLjfKijSoUZJdegVn0xMLlEVX59uljdbEncMmLXPcu24JZ96S+wcFL8cOjGsGkglou7crzEfHS+8YjNg6uwwnbz8OvO9vEWyqX1dACRZHJfxejrzF3G4uUiZrAuGIXcqgACcGTbYI2hujQIpK5GwvOvfPbf7UFimTVJjkrDJdPg4MfPfVJZ8sjR2I9xkr6V4gv+A7OvJWMFnrWMWl3oO/XYo3j7DR6lwmBegdcGueGlGbRzlyFicJArnbsRQhTZFYOJeWIJcHyuZ74r+vUyBFy6go2mSu0k8psApMKW6Z9DM6PyOpUHaUErkFf1tXzvQcjr/xTiXqGhhhu0cqsAo84xbjo4UJXiV2o12JEVcJQR7HT8/yOhuP3lG165o1KuwFvi4mWXFC8nz+P04I9GwYOwDH4094CWabNW0jVq7RCphB1xUoFK1NCbkxeceWfLG0nK4C96HbRtTtuoYXL7BecgV4z7YKiNWGPzhBYmE8kRDwge++Fp3iYmr7EgVWgeMLEcg9EgviFEEVYAZmiYip7X0d+YqVF5RDKSgMTHXyvrz+4slOgipAHXqFxtF2U72XiW6V5AqwCUz79qqbP8sSgj4sC8dDMa8wZAbBL/1SYgGcIbACdMas0TG2f2CBSYBFxKLcQrC8Cxdb0s5jmsF9BFYAvAs4Pa72G/32GyVXgMachbjhFw6VOJXeq04MWRvKrAQHxNz+nZIrAHL7kFwCOFfizt+YEPxpNI+IW8HOVGAfMCOXAJZJ3HEjQU8+BdAVUIClWex/rVLi3DE7YkuXIZcCdFLgPuD9jCGl8D+qJe50k2vpFUTxUEqwQYFV4DDV7L+lRO3QCrBaAQWYlqnjd0nc4blE7dAKcK8CCnBneqcr/GNCaTt8KlE7tAIsUEABXnWMQDwI2/xslbjDxxC1QyvATEVyiO4R7LQmcZqM5rgvkCRTgOkKKAByoibY6ckSd3anK2AATAsqwGQFFAAxKdhpmZOm7ogrc7OkCjBOEQXwD0ZcP+35r6TOE9mgtSJqh1YAUxEF+JVtVX9zAiRzDakvN1hJUoDwClCviAIg5ytSAfD/ldkEajQ1MoHIBEoHcr5NKt23zP4fO4vNp6+oApymiAIg57umKr80S37kdRBRO7QCXKCIAvi8gB/DKBaUnoAC3KiIAiCGYYfHK9DRs4naoRVgqUIKME6Vm79bidqhFWCdQgpwJnb4YgU6+tIrwwcQu/OTv4sCATFBXKRK6cxtjqF3I4rnVYDjFCK/fxsMP25QpLOjiOJ5FWCOYgpwg8zV39OxmCie/XGNGuTCi4opwBIVcsGk8DY5xeWc/fs6ctaCzh0ZpkgIXOri4wSielYFOE8x8iPuUUkBEL8mqmcgv6G1ktwhMhvulT0Y/ruu0QLnB41x9j9S4pSYuWsGwI/bFev0dUT57yjAIwqSH3E7dv5mxTr9qRNhpUUJyH8wy5ytogLchAKYx+Fm9cuYPVQXNY2vpaNPyzv6fLBMY7qNQ6/jeSiAWZx9FNZ1HQIYzKqW3A94I+LiDdszpsdTb/YfHENRjGaWduQFvHsBTAUkWeJd3opyz0IhnM17JjfX1FOBOyOYwj4J2OiUVsh7ta1wrDBGQwGej2Cy+hDwLDOlf+TnmNWqbFNLf9+FXHoJw4+JHH6YDQKsyjWAth/KifbrFPT2ZMd4nxf4nul07l8Q2TGMEIvpzWe8OTjM5SIWJuH0lnligs2qPG5QCrq0cqzaCrZKHAu4lK0S7+c53vs0Z8UQuTe+W/NUUsEVdjngahYnvB+gyqkbVMz7qjk0fxDDUx/HY0jkb5qsI0ocaG/mweJvPwTcAvg74LM0pXgZ0FEh8ndkfU7foL6CvjGYRxUz6W0wkxURvpPHKDPsdzV+XA9OtRP90vvGQIA92IZsGlMKtINvbLKS0tcPcCyvEN4iwOuABwA/ZSGxXV1jYFwKh3lnP+I2WQL8aMc2MjyaQQviJsWGuhrYTyTbwYZYfgUwk5WAzq5ZvmIhDr915zZ7+xfb0tB2XsvpR34C6EnXVYLeM/iTq8Mpt9a6aOY5hnfE+CTHt3XXNMVcHZGe2Gb/Mzjm1W9dS/vmQ3mOCsOTmj5EJ7Ee29Q7cDz7I64Pauo03r323AaNWCXW7H8+55yaGvzYIQKkNyQ7SBzy92SbTJ45NSR4PNZDgHC4F21MZkoP3+Q3kiLEmSPX9wwqAJ4ENQrgvgrLag2xjO/Zf0gMDnZRo9FOrxuN4YICKMAWFV0XBCJ/e8C/RCiOkenjRckK/Ixr6q2Jbnw9600vtuBqQTh0fralS5RInivIFOJu9h8qgOmTwlEZNi/erd02gYLbhxHtuCF/d87P/L9VI9g1tXbf6USjMRA78heBVoENVPml5R/X0iphHB4TiDers3oZ+6aFUEHNv3NMjfYDLfQ0+UeeswTjzOW5lrIjBYzsn20bFOBe7mdd/YBUSdWdgvFlcC4FwH3AJsE6hBsvgyhZdru/lvlpiVYZsl2OTnlL2kMCrgKfUJaHspL/IMB7AvLkQadBz9u5iQJ2bDe7yT7ANfWehPiAF5ECnfikY0LYI61tgnZwJ8tyRogPOwXlBiYB6J5fAfxCCU8pmiqPIC8w6Cu0jTeFBEaQDJML2eR0ypM3hkAQy/yx9E6F7vQfIMERJMH9xRx1DSfBESTBD4pRAMy9uZ6ERxAc65xiIwnhD2eSAAmCY2YpN369BNoMY77P0x1TO5kQK053xKknhhGEvUpQgKRIdYS/ti29GzkqxOwGYWjdS6zLUE7cmdf1IcQqUC1Ih0kBSAHS6xlUR+D4pIlyM0wKQAoQxFOuEVHYLEuhvYsUgB5BFGBXpCGztp9X/s+kAPQIogDI1Wjrv0GDx3G+CpACkAKkZv9jI+84FlWAhleQApACcK4AK1wzpmInjl/v9WtSAFIAjk9+krF13q7Xy1VZnBSAFKAYPODGnTCNlcvcSgpACsBhxNd+sQugaeRQVIIrSQFIAXhLj/PmCUPLJAS/1KhNCkAKwAmAi8k9yisIUx/t8FVgmxRATQVADo4uuyDcBq+86qOkAKQALYxHG40WKnIOL+/NUSY5UgD1FAC517tlBeJnkGjm5AZwBuA0QqyYwYlHAHJuSovPCLbhmUKPUOQRocx4xGkp0+c7+wHTqzL5Dg0KoUx42wlWeeTCNvSzSHxFg0OIGcix4dxtjlw/cOZKGiBCzLjSNnVOTwhMrQ3nHqMEsbHC4b1YOnzgXo5fv4sGjBB1Tbi9xDgr9t2mtwgqaDxe2yEpmgUdky2xujnHpASnOOLUjQ1iLZhyPeQE9k248UAOnSLejaGpi1hBEOFKe4sLfRNwPGY5vG56854MWUm8JFsimMA/c8xklWzkt029yu+bUGOxxK7XK0SfdVDwjwu2B+gv4ezfX7A9wOO2qVXJIvyOgJUCCX+ChAowQSD5r0LOyDYAWHXmWUEG4G4JFeBuQWT/HKCzrJuwboDnBRiE/8o0A7EVWIQC6C84YSo5Cj4Y3QVZCSZKJPNJgsz8cpM/zRxaxfmAvARoLYGsWwP+ybmsV0tr9uRZlp/g/DRoggRynsj56c9STLKQUPGBjrcF3M6533kXgeXbhfWBV/negRxIqPy4poaZp68A7OR0kO6JPNNwWcifRLney6lMcaxnO5Z4co3nltLSUxs1HjPOYfzr9EYrKRD5a1Ge0znN5o1jPBnIT8QPPk+fMwIHbRDgLQ4HbTtglECmzyj2zTyalIN395lOhM8xeL04vTXeBhjpNAzmVnZNY7yVdCT7Vt7kh2O6NzE8nBKg/9A1HLpTf46pOJrqa7hz0LLNmgrPtPC/kTd7H8eyiphdmBIgTgS8x+GALgAblpsBdQxvwpjP4UHCRjTHbEMjQhd/SuSZRE9weJb9ImCg09Byg2tbNThJHM6+hbc7lKUlFammJ2DbWt6R3hmADzkb6C8ACwE9WmCF3JO9+wvOZPIRYKptaXTEGe2Ae2lX9gUs43CDtxkwpxybPHZI8HPABxzK4bc4Rq9PGkGEjVERcLM3jtMbzu0sTSQeQ3aIkPQdACcBHuJwxt/Njq7H22ayghhaPhOgK9v4bef0thNn6McA5wBq0dnLGZufIPg73u/6mTXOBvya09k+pfDzbUvvSoxsib1BQxIV4VC2Sea5hnEzuwG12Xn4w4DFgBsYFrN/W8V+ZyvnDmy72Cb30Ka4i9LRE2Y18GbNYwBPC5z7RpR46WewELVr6GTucGgWVQLGsMgiUoRoiY9HrXWuqVUS03i/O7A8RcCN6F9JESKZ8XFSIeILtyL4xTuOYhvJL4nQobGDbeCHNDaQqSPLHQLmxZkLeJcInhXodvILwP6ukSTiSHdqNG4QKkJ7QAPgSSro8U3Bid+xu5UOTeMHEVHUMI/0VCr3GSxLxVeKkR77fB7eXtsmBafQMaqp92WXVss5jUyLIp7hj0zh+7mwP6KRpyeTMqRumfHk4ybAGo7jlXPWTWbp0BcB6v0+kVsyPYUqhKVXMM9LEzCP5bDZzFm19F3sm/7qxSr439oTQLM8PdE+tr93wFpoBwIswGXMSe0FllLxq5juHppZ2/iOfzDXiSvYhv4g10xW4bfRQ0/5V4k673QpYftFAjH/6WGA41mI4vns+PVWwP3sfP33zB5fyfBH9m+Psd+5lf3NBYBTWZzv97FtfIcLM7s7lsgexfN/McuIGxiL1x0AAAAASUVORK5CYII=
spec:
selector:
matchLabels:
app.kubernetes.io/name: {{ .Chart.Name }}
componentKinds:
- group: core
kind: Service
- group: apps
kind: Deployment
- group: apps
kind: Ingress
- group: apps
kind: StatefulSet
descriptor:
type: "prometheus"
version: {{ .Chart.Version }}
description: "Prometheus provides metrics and alerting"
keywords:
- "dashboard"
- "metric"
links:
- description: Prometheus
url: "https://{{ .Values.domain }}/prometheus/"
- description: Grafana Dashboard
url: "https://{{ .Values.domain }}/grafana/"
================================================
FILE: src/app_charts/prometheus/cloud/base-alerts.yaml
================================================
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
labels:
app.kubernetes.io/name: {{ .Chart.Name }}
prometheus: kube-prometheus
role: alert-rules
name: base-alerts
spec:
groups:
- name: base-alerts.rules
rules:
- alert: CloudRoboticsPodFrequentlyRestarting
expr: increase(kube_pod_container_status_restarts_total[1h]) > 5
for: 10m
labels:
severity: warning
annotations:
description: Pod {{`{{$labels.namespace}}`}}/{{`{{$labels.pod}}`}} was restarted {{`{{$value}}`}}
times within the last hour
logs: "https://console.cloud.google.com/logs/viewer?project={{ .Values.project }}&resource=container&logName=projects%2F{{ .Values.project }}%2Flogs%2F{{`{{$labels.container}}`}}&interval=PT6H"
summary: Pod is restarting frequently
================================================
FILE: src/app_charts/prometheus/cloud/federation-service-monitor.yaml
================================================
{{ range .Values.robots }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: prometheus-federation-{{ .name }}
labels:
prometheus: kube-prometheus
spec:
endpoints:
- port: http
path: /client/{{ .name }}/federate
params:
'match[]':
# Identified via Prometheus query: topk(10, count by (__name__)({__name__=~".+"}))
# As of 2021-07-21 this config reduces the number of time series scraped by approx 1/4
- '{__name__=~".+", __name__!~"apiserver_request_duration_seconds_bucket|apiserver_request_slo_duration_seconds_bucket|etcd_request_duration_seconds_bucket|apiserver_response_sizes_bucket"}'
honorLabels: true
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
# These are the labels below on the federation service
targetLabels:
- robot
- cluster
selector:
matchLabels:
app: prometheus-federation
robot: "{{ .name }}"
---
apiVersion: v1
kind: Service
metadata:
name: prometheus-federation-{{ .name }}
labels:
app: prometheus-federation
robot: "{{ .name }}"
cluster: "{{ .name }}"
spec:
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
app: prometheus-relay-server
type: ClusterIP
---
{{ end }}
================================================
FILE: src/app_charts/prometheus/cloud/grafana-ingress.yaml
================================================
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grafana
labels:
app.kubernetes.io/name: {{ .Chart.Name }}
annotations:
nginx.ingress.kubernetes.io/auth-url: "{{ tpl .Values.gf_ingress_auth_url . }}"
nginx.ingress.kubernetes.io/auth-signin: "{{ tpl .Values.gf_ingress_auth_signin . }}"
nginx.ingress.kubernetes.io/backend-protocol: HTTP
nginx.ingress.kubernetes.io/rewrite-target: /$2
# HACK: oauth2-proxy will return 403, but nginx-ingress-controller only handles
# 401 with an error page.
nginx.ingress.kubernetes.io/configuration-snippet: |
error_page 403 = {{ tpl .Values.gf_ingress_error_page_403 . }};
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.domain }}
http:
paths:
- path: /grafana($|/)(.*)
pathType: Prefix
backend:
service:
name: prom-grafana
port:
number: 80
================================================
FILE: src/app_charts/prometheus/cloud/prometheus-ingress.yaml
================================================
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: prometheus
labels:
app.kubernetes.io/name: {{ .Chart.Name }}
annotations:
nginx.ingress.kubernetes.io/auth-url: "{{ tpl .Values.prom_ingress_auth_url . }}"
nginx.ingress.kubernetes.io/auth-signin: "{{ tpl .Values.prom_ingress_auth_signin . }}"
nginx.ingress.kubernetes.io/backend-protocol: HTTP
nginx.ingress.kubernetes.io/rewrite-target: /$2
nginx.ingress.kubernetes.io/configuration-snippet: |
error_page 403 = {{ tpl .Values.prom_ingress_error_page_403 . }};
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.domain }}
http:
paths:
- path: /prometheus($|/)(.*)
pathType: Prefix
backend:
service:
name: kube-prometheus
port:
number: 9090
================================================
FILE: src/app_charts/prometheus/cloud/prometheus-operator.yaml
================================================
# This includes all resources expanded from the prometheus-operator chart using
# the values in ../prometheus-cloud.values.yaml.
# Some pseudo-variables that were inserted there are replaced with actual runtime values.
# TODO(rodrigoq): This severely limits how the end-user can customize the
# prometheus deployment. How could we let them override prometheus-cloud.values.yaml?
# NOTE: The order here is important. The domain and project might be part of other values and
# need to be replaced last.
{{- $data := .Files.Get "files/prometheus-operator-chart.cloud.yaml" -}}
# Pre-process variables
{{- $data = $data | replace "${CR_GF_SERVER_DOMAIN}" .Values.gf_server_domain | replace "${CR_GF_SERVER_ROOT_URL}" .Values.gf_server_root_url | replace "${CR_GF_CSRF_TRUSTED_ORIGINS}" .Values.gf_csrf_trusted_origins | replace "${CR_GF_SMTP_ENABLED}" .Values.gf_smtp_enabled | replace "${CR_GF_SMTP_HOST}" .Values.gf_smtp_host | replace "${CR_GF_SMTP_USER}" .Values.gf_smtp_user | replace "${CR_GF_SMTP_PASSWORD}" .Values.gf_smtp_password | replace "${CR_GF_SMTP_FROM_ADDRESS}" .Values.gf_smtp_from_address | replace "${CR_GF_SMTP_FROM_NAME}" .Values.gf_smtp_from_name | replace "${CR_GF_SMTP_SKIP_VERIFY}" .Values.gf_smtp_skip_verify | replace "${CR_GF_INGRESS_AUTH_URL}" .Values.gf_ingress_auth_url | replace "${CR_GF_INGRESS_AUTH_SIGNIN}" .Values.gf_ingress_auth_signin | replace "${CR_GF_INGRESS_ERROR_PAGE_403}" .Values.gf_ingress_error_page_403 | replace "${CR_PROM_INGRESS_AUTH_URL}" .Values.prom_ingress_auth_url | replace "${CR_PROM_INGRESS_AUTH_SIGNIN}" .Values.prom_ingress_auth_signin | replace "${CR_PROM_INGRESS_ERROR_PAGE_403}" .Values.prom_ingress_error_page_403 | replace "HELM-NAMESPACE" .Release.Namespace | replace "${LIMITS_MEMORY}" .Values.limits.memory | replace "${LIMITS_CPU}" .Values.limits.cpu | replace "${REQUESTS_STORAGE}" .Values.requests.storage | replace "${RETENTION_TIME}" .Values.retention.time | replace "${RETENTION_SIZE}" .Values.retention.size | replace "${EXTERNAL_URL}" .Values.prom_external_url | replace "${CLOUD_ROBOTICS_DOMAIN}" .Values.domain | replace "${GCP_PROJECT_ID}" .Values.project -}}
# Inject the nodeSelector as a pre-formatted YAML block to allow users to define multiple selectors in a dict within values-cloud.yaml while maintaining valid indentation in the output.
{{- $prometheusNodeSelectorMap := .Values.prometheus.prometheusSpec.nodeSelector -}}
{{- if $prometheusNodeSelectorMap }}
{{- $prometheusNodeSelectorRawYaml := toYaml $prometheusNodeSelectorMap -}}
{{- $formattedPrometheusNodeSelectorObject := $prometheusNodeSelectorRawYaml | nindent 6 -}}
{{- $data = $data | replace "${CR_PROMETHEUS_NODE_SELECTOR_OBJECT}" $formattedPrometheusNodeSelectorObject -}}
{{- else }}
{{- $data = $data | replace "${CR_PROMETHEUS_NODE_SELECTOR_OBJECT}" " {}" -}}
{{- end }}
# Inject the tolerations as a pre-formatted YAML block to support multiple key-value pairs in a list within values-cloud.yaml
{{- $prometheusTolerationsList := .Values.prometheus.prometheusSpec.tolerations -}}
{{- if $prometheusTolerationsList }}
{{- $prometheusTolerationsRawYaml := toYaml $prometheusTolerationsList -}}
{{- $formattedTolerationsObject := $prometheusTolerationsRawYaml | nindent 6 -}}
{{- $data = $data | replace "${CR_PROMETHEUS_TOLERATIONS_OBJECT}" $formattedTolerationsObject -}}
{{- else }}
{{- $data = $data | replace "${CR_PROMETHEUS_TOLERATIONS_OBJECT}" " []" -}}
{{- end }}
{{ $data }}
================================================
FILE: src/app_charts/prometheus/cloud/prometheus-relay.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus-relay-server
labels:
app.kubernetes.io/name: {{ .Chart.Name }}
spec:
replicas: 1
selector:
matchLabels:
app: prometheus-relay-server
template:
metadata:
labels:
app: prometheus-relay-server
spec:
containers:
- name: prometheus-relay-server
image: {{ .Values.registry }}{{ .Values.images.http_relay_server }}
args:
- --log_level=4 # WARN
- --port=8080
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 15
ports:
- name: http
containerPort: 8080
resources:
requests:
memory: "16Mi"
cpu: "100m"
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
securityContext:
runAsNonRoot: true
runAsUser: 65532
runAsGroup: 65532
---
apiVersion: v1
kind: Service
metadata:
name: prometheus-relay-server
labels:
app.kubernetes.io/name: {{ .Chart.Name }}
# This is used by the ServiceMonitor.
app: prometheus-relay-server
spec:
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
app: prometheus-relay-server
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: prometheus-relay-server
labels:
app.kubernetes.io/name: {{ .Chart.Name }}
annotations:
nginx.ingress.kubernetes.io/client-body-buffer-size: "50m"
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
nginx.ingress.kubernetes.io/rewrite-target: /server/$2
nginx.ingress.kubernetes.io/auth-url: "http://token-vendor.default.svc.cluster.local/apis/core.token-vendor/v1/token.verify?robots=true"
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.domain }}
http:
paths:
- path: /apis/core.prometheus-relay/server($|/)(.*)
pathType: Prefix
backend:
service:
name: prometheus-relay-server
port:
number: 80
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: prometheus-relay-server
labels:
app.kubernetes.io/name: {{ .Chart.Name }}
prometheus: kube-prometheus
spec:
endpoints:
- port: http
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
{{- if .Values.prometheus.serviceMonitor.metricRelabelings }}
metricRelabelings: {{- tpl (toYaml .Values.prometheus.serviceMonitor.metricRelabelings | nindent 6) . }}
{{- end }}
selector:
matchLabels:
app: prometheus-relay-server
================================================
FILE: src/app_charts/prometheus/cloud/storage-class.yaml
================================================
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ssd
labels:
app.kubernetes.io/name: {{ .Chart.Name }}
provisioner: kubernetes.io/gce-pd
parameters:
type: pd-ssd
allowVolumeExpansion: true
================================================
FILE: src/app_charts/prometheus/prometheus-cloud.values.yaml
================================================
# Configuration for the prometheus-operator chart.
# Reference:
# https://github.com/prometheus-community/helm-charts/blob/kube-prometheus-stack-41.5.1/charts/kube-prometheus-stack/values.yaml
#
# WARNING: the prometheus-operator chart is complicated and error-prone. If you
# edit this file, run the following command to generate the output with `helm
# template`, and verify that your changes have the expected effect.
#
# bazel build src/app_charts/prometheus/prometheus-operator-chart.cloud.yaml
nameOverride: kube
fullnameOverride: kube
kubeTargetVersionOverride: "1.23.8"
# Alertmanagers have to be deployed individually by users.
alertmanager:
enabled: false
defaultRules:
rules:
kubeApiserver: false
prometheus:
prometheusSpec:
# Pick up all service monitors across all namespaces.
serviceMonitorNamespaceSelector:
# Inverse selector selects everything
matchExpressions:
- key: "non-existent-label-for-universal-matching"
operator: "DoesNotExist"
serviceMonitorSelector:
# Inverse selector selects everything
matchExpressions:
- key: "non-existent-label-for-universal-matching"
operator: "DoesNotExist"
# Pick up all pod monitors across all namespaces.
podMonitorNamespaceSelector:
# Inverse selector selects everything
matchExpressions:
- key: "non-existent-label-for-universal-matching"
operator: "DoesNotExist"
podMonitorSelector:
# Inverse selector selects everything
matchExpressions:
- key: "non-existent-label-for-universal-matching"
operator: "DoesNotExist"
# Pick up all rules across all namespaces.
ruleNamespaceSelector:
# Inverse selector selects everything
matchExpressions:
- key: "non-existent-label-for-universal-matching"
operator: "DoesNotExist"
ruleSelector:
# Inverse selector selects everything
matchExpressions:
- key: "non-existent-label-for-universal-matching"
operator: "DoesNotExist"
externalUrl: "${EXTERNAL_URL}"
retention: "${RETENTION_TIME}"
retentionSize: "${RETENTION_SIZE}"
walCompression: true
nodeSelector: "${CR_PROMETHEUS_NODE_SELECTOR_OBJECT}"
tolerations: "${CR_PROMETHEUS_TOLERATIONS_OBJECT}"
resources:
requests:
cpu: "${LIMITS_CPU}"
memory: "${LIMITS_MEMORY}"
limits:
cpu: "${LIMITS_CPU}"
memory: "${LIMITS_MEMORY}"
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: ssd
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: "${REQUESTS_STORAGE}"
# Pick up user-created Alertmanager pods with app=alertmanager and a non-empty port.
additionalAlertManagerConfigs:
- kubernetes_sd_configs:
- role: service
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
authorization:
type: Bearer
credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels: [__meta_kubernetes_service_label_app]
regex: kube-alertmanager
action: keep
# Set absurdly high thresholds as a workaround for not being able to disable these and not having enough time to WAL replay
# https://github.com/prometheus-operator/prometheus-operator/issues/3587
containers:
- name: prometheus
readinessProbe:
initialDelaySeconds: 300
failureThreshold: 100000
serviceMonitor:
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
# etcd, scheduler, and controller-manager are managed by GKE and hidden.
kubeEtcd:
enabled: false
kubeControllerManager:
enabled: false
kubeScheduler:
enabled: false
coreDns:
enabled: false
# Throws an invalid namespace "kube-system" error during deployment, as this is
# trying to install resources into the kube-system namespace, which synk does
# not support.
kubeProxy:
enabled: false
prometheusOperator:
admissionWebhooks:
enabled: true
certManager:
enabled: true
issuerRef:
name: "selfsigned-issuer"
kind: "ClusterIssuer"
serviceMonitor:
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
# Default scraping interval is 20s and these metrics result in a large amount of data
kubeApiServer:
serviceMonitor:
interval: 1m
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
kubelet:
serviceMonitor:
# From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource
resourcePath: "/metrics/resource"
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
- sourceLabels: [__metrics_path__]
targetLabel: metrics_path
kubeStateMetrics:
serviceMonitor:
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
# Subcharts
nodeExporter:
enabled: true
serviceMonitor:
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
grafana:
env:
GF_SERVER_DOMAIN: "${CR_GF_SERVER_DOMAIN}"
GF_SERVER_ROOT_URL: "${CR_GF_SERVER_ROOT_URL}"
GF_AUTH_ANONYMOUS_ENABLED: "true"
# Load dashboards from configmaps with a given label across all namespaces.
sidecar:
dashboards:
enabled: true
label: grafana # Label our own legacy grafana-operator uses.
searchNamespace: ALL
multicluster:
global:
enabled: true
etcd:
enabled: true
grafana.ini:
analytics:
check_for_updates: false
security:
csrf_trusted_origins: "${CR_GF_CSRF_TRUSTED_ORIGINS}"
smtp:
enabled: "${CR_GF_SMTP_ENABLED}"
host: "${CR_GF_SMTP_HOST}"
user: "${CR_GF_SMTP_USER}"
password: "${CR_GF_SMTP_PASSWORD}"
from_address: "${CR_GF_SMTP_FROM_ADDRESS}"
from_name: "${CR_GF_SMTP_FROM_NAME}"
skip_verify: "${CR_GF_SMTP_SKIP_VERIFY}"
serviceMonitor:
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
gf_ingress_auth_url: "${CR_GF_INGRESS_AUTH_URL}"
gf_ingress_auth_signin: "${CR_GF_INGRESS_AUTH_SIGNIN}"
gf_ingress_error_page_403: "${CR_GF_INGRESS_ERROR_PAGE_403}"
prom_ingress_auth_url: "${CR_PROM_INGRESS_AUTH_URL}"
prom_ingress_auth_signin: "${CR_PROM_INGRESS_AUTH_SIGNIN}"
prom_ingress_error_page_403: "${CR_PROM_INGRESS_ERROR_PAGE_403}"
================================================
FILE: src/app_charts/prometheus/prometheus-robot.values.yaml
================================================
# Configuration for the prometheus-operator chart.
# Reference:
# https://github.com/prometheus-community/helm-charts/blob/kube-prometheus-stack-15.4.6/charts/kube-prometheus-stack/values.yaml
#
# WARNING: the prometheus-operator chart is complicated and error-prone. If you
# edit this file, run the following command to generate the output with `helm
# template`, and verify that your changes have the expected effect.
#
# bazel build src/app_charts/prometheus/prometheus-operator-chart.robot.yaml
nameOverride: kube
fullnameOverride: kube
kubeTargetVersionOverride: "1.23.8"
alertmanager:
enabled: false
defaultRules:
create: false
prometheus:
prometheusSpec:
# Pick up all service monitors across all namespaces.
serviceMonitorNamespaceSelector:
# Inverse selector selects everything
matchExpressions:
- key: "non-existent-label-for-universal-matching"
operator: "DoesNotExist"
serviceMonitorSelector:
# Inverse selector selects everything
matchExpressions:
- key: "non-existent-label-for-universal-matching"
operator: "DoesNotExist"
# Pick up all pod monitors across all namespaces.
podMonitorNamespaceSelector:
# Inverse selector selects everything
matchExpressions:
- key: "non-existent-label-for-universal-matching"
operator: "DoesNotExist"
podMonitorSelector:
# Inverse selector selects everything
matchExpressions:
- key: "non-existent-label-for-universal-matching"
operator: "DoesNotExist"
# Pick up all rules across all namespaces.
ruleNamespaceSelector:
# Inverse selector selects everything
matchExpressions:
- key: "non-existent-label-for-universal-matching"
operator: "DoesNotExist"
ruleSelector:
# Inverse selector selects everything
matchExpressions:
- key: "non-existent-label-for-universal-matching"
operator: "DoesNotExist"
logLevel: warn
# Historical data is limited to the lower of `retention` and
# `retentionSize`. WAL+maxChunkSize also counted toward retentionSize,
# but Prometheus puts no limit on WAL size, so it's up to us to make
# sure we don't have too many metrics and the WAL doesn't grow too big.
retention: "3h"
# If you increase retentionSize, increase sizeLimit below as well, but
# remember that this is RAM, not disk. We don't know how much headroom
# is needed, but if set equal you can still run out of disk space.
retentionSize: 448MB
# Reduce the max chunk size (default=512MB) to reduce the headroom required
# for the in-progress chunk.
# This chunk size correlates with the current retention size. Larger
# retention sizes make it so that WAL is not truncated as it should be.
additionalArgs:
- name: storage.tsdb.max-block-chunk-segment-size
value: 16MB
- name: storage.tsdb.wal-segment-size
value: 16MB
storageSpec:
emptyDir:
medium: Memory
sizeLimit: 512Mi
serviceMonitor:
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
# Throws an invalid namespace "kube-system" error during deployment, as this is
# trying to install resources into the kube-system namespace, which synk does
# not support.
kubeEtcd:
enabled: false
kubeControllerManager:
enabled: false
kubeProxy:
enabled: false
kubeScheduler:
enabled: false
coreDns:
enabled: false
prometheusOperator:
admissionWebhooks:
enabled: true
certManager:
enabled: true
issuerRef:
name: "selfsigned-issuer"
kind: "ClusterIssuer"
serviceMonitor:
metricRelabelings:
# Drop high cardinality kube state metrics
- action: drop
regex: "kube_*"
sourceLabels: [ __name__ ]
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
# Default scraping interval is 20s and these metrics result in a large amount of data
kubeApiServer:
serviceMonitor:
interval: 10m
metricRelabelings:
# Drop high cardinality apiserver metrics.
- action: drop
regex: "apiserver_(request|response|watch|admission).*|etcd_request.*|code_*"
sourceLabels: [__name__]
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
kubelet:
serviceMonitor:
# From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource
resourcePath: "/metrics/resource"
metricRelabelings:
# Drop high cardinality metrics from kubelet.
- action: drop
regex: "container_network.*|container_blkio.*|kubelet_.*|kubernetes_feature_enabled|container_fs.*|container_processes|container_last_seen|storage_operation_duration_seconds_bucket"
sourceLabels: [__name__]
cAdvisorMetricRelabelings:
# Drop high cardinality metrics from kubelet. (with metrics_path="/metrics/cadvisor")
- sourceLabels: [__name__]
action: drop
regex: 'container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total)'
- sourceLabels: [__name__]
action: drop
regex: 'container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total)'
- sourceLabels: [__name__]
action: drop
regex: 'container_memory_(mapped_file|swap|failures_total)'
- sourceLabels: [__name__]
action: drop
regex: 'container_(file_descriptors|tasks_state|threads_max)'
- sourceLabels: [__name__]
action: drop
regex: 'container_spec.*|container_network.*'
- sourceLabels: [id, pod]
action: drop
regex: '.+;'
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
- sourceLabels: [__metrics_path__]
targetLabel: metrics_path
# Subcharts
nodeExporter:
enabled: true
serviceMonitor:
metricRelabelings:
# Drop high cardinality metrics from node exporter.
- action: drop
regex: "node_cpu_seconds_total"
sourceLabels: [ __name__ ]
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
prometheus-node-exporter:
extraArgs:
# This collector produces log-spam on newer kernels
# https://github.com/prometheus/node_exporter/issues/1892
- --no-collector.rapl
# Since we have hardware network IRQs, this generates 7 zero-value metrics for each CPU core.
- --no-collector.softnet
# This is disabled by default, since it might leak memory
# (https://github.com/prometheus/node_exporter/blob/master/CHANGELOG.md#0160-rc1--2018-04-04)
- --collector.wifi
# Export CPU model (one metric per core)
- --collector.cpu.info
# Ignore more fuse filesystems
# https://github.com/prometheus/node_exporter/blob/master/collector/filesystem_linux.go#L33
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|fuse\.\w*|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
# Ignore filesystems with UIDs in the mount points (high cardinality)
- --collector.filesystem.mount-points-exclude=^(/run/containerd/|/var/lib/kubelet)
# Ignore virtual network devices
- --collector.netdev.device-exclude=^(bond|cilium|ip6tnl0|lo|lxc|tunl)
- --collector.netclass.ignored-devices=^(bond|cilium|ip6tnl0|lo|lxc|tunl)
grafana:
enabled: false
================================================
FILE: src/app_charts/prometheus/robot/hw-exporter.yaml
================================================
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: hw-exporter
spec:
selector:
matchLabels:
app: hw-exporter
template:
metadata:
labels:
app: hw-exporter
spec:
containers:
- name: hw-exporter
image: {{ .Values.registry }}{{ .Values.images.hw_exporter }}
args:
- --metrics-port=9100
- --chroot=/host
volumeMounts:
- mountPath: /host/proc
name: proc
readOnly: true
- mountPath: /host/sys
name: sys
readOnly: true
- mountPath: /host/usr/share
name: usr-share
readOnly: true
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
tolerations:
- operator: Exists
effect: NoSchedule
volumes:
- hostPath:
path: /proc
type: ""
name: proc
- hostPath:
path: /sys
type: ""
name: sys
# Mount pcidb from host, which could be in /usr/share/misc or /usr/share/hwdata.
- hostPath:
path: /usr/share
type: ""
name: usr-share
---
apiVersion: v1
kind: Service
metadata:
name: hw-exporter
labels:
app: hw-exporter
spec:
clusterIP: None
ports:
- port: 9100
name: http-metrics
selector:
app: hw-exporter
type: ClusterIP
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: hw-exporter
labels:
prometheus: kube-prometheus
spec:
endpoints:
- port: http-metrics
path: /metrics
interval: 60s
selector:
matchLabels:
app: hw-exporter
================================================
FILE: src/app_charts/prometheus/robot/prometheus-adapter.yaml
================================================
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/version: 0.12.0
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: prometheus-adapter
namespace: {{ .Release.Namespace }}
version: v1beta1
versionPriority: 100
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/version: 0.12.0
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/version: 0.12.0
name: resource-metrics:system:auth-delegator
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: prometheus-adapter
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: hpa-controller-custom-metrics
labels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: custom-metrics-server-resources
subjects:
- kind: ServiceAccount
name: horizontal-pod-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/version: 0.12.0
name: prometheus-adapter
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus-adapter
subjects:
- kind: ServiceAccount
name: prometheus-adapter
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/version: 0.12.0
name: resource-metrics-server-resources
rules:
- apiGroups:
- metrics.k8s.io
resources:
- '*'
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/version: 0.12.0
name: prometheus-adapter
rules:
- apiGroups:
- ""
resources:
- nodes
- namespaces
- pods
- services
verbs:
- get
- list
- watch
---
apiVersion: v1
data:
config.yaml: |-
"resourceRules":
"cpu":
"containerLabel": "container"
"containerQuery": |
sum by (<<.GroupBy>>) (
irate (
container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!="",pod!=""}[4m]
)
)
"nodeQuery": |
sum by (<<.GroupBy>>) (
irate(
node_cpu_usage_seconds_total{<<.LabelMatchers>>}[4m]
)
)
"resources":
"overrides":
"namespace":
"resource": "namespace"
"node":
"resource": "node"
"pod":
"resource": "pod"
"memory":
"containerLabel": "container"
"containerQuery": |
sum by (<<.GroupBy>>) (
container_memory_working_set_bytes{<<.LabelMatchers>>,container!="",pod!=""}
)
"nodeQuery": |
sum by (<<.GroupBy>>) (
node_memory_working_set_bytes{<<.LabelMatchers>>}
)
"resources":
"overrides":
"node":
"resource": "node"
"namespace":
"resource": "namespace"
"pod":
"resource": "pod"
"window": "5m"
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/version: 0.12.0
name: adapter-config
namespace: {{ .Release.Namespace }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/version: 0.12.0
name: prometheus-adapter
namespace: {{ .Release.Namespace }}
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
template:
metadata:
labels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/version: 0.12.0
spec:
automountServiceAccountToken: true
containers:
- args:
- --cert-dir=/var/run/serving-cert
- --config=/etc/adapter/config.yaml
- --metrics-relist-interval=1m
- --prometheus-url=http://kube-prometheus.{{ .Release.Namespace }}.svc:9090/
- --secure-port=6443
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
image: registry.k8s.io/prometheus-adapter/prometheus-adapter:v0.12.0
livenessProbe:
failureThreshold: 5
httpGet:
path: /livez
port: https
scheme: HTTPS
initialDelaySeconds: 30
periodSeconds: 5
name: prometheus-adapter
ports:
- containerPort: 6443
name: https
readinessProbe:
failureThreshold: 5
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 30
periodSeconds: 5
resources:
requests:
cpu: 102m
memory: 180Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp
name: tmpfs
readOnly: false
- mountPath: /var/run/serving-cert
name: volume-serving-cert
readOnly: false
- mountPath: /etc/adapter
name: config
readOnly: false
nodeSelector:
kubernetes.io/os: linux
securityContext: {}
serviceAccountName: prometheus-adapter
volumes:
- emptyDir: {}
name: tmpfs
- emptyDir: {}
name: volume-serving-cert
- configMap:
name: adapter-config
name: config
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/version: 0.12.0
name: prometheus-adapter
namespace: {{ .Release.Namespace }}
spec:
egress:
- {}
ingress:
- {}
podSelector:
matchLabels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
policyTypes:
- Egress
- Ingress
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
labels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/version: 0.12.0
name: prometheus-adapter
namespace: {{ .Release.Namespace }}
spec:
minAvailable: 1
selector:
matchLabels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/version: 0.12.0
name: resource-metrics-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: prometheus-adapter
namespace: {{ .Release.Namespace }}
---
apiVersion: v1
automountServiceAccountToken: false
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/version: 0.12.0
name: prometheus-adapter
namespace: {{ .Release.Namespace }}
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/version: 0.12.0
name: prometheus-adapter
namespace: {{ .Release.Namespace }}
spec:
ports:
- name: https
port: 443
targetPort: 6443
selector:
app.kubernetes.io/component: metrics-adapter
app.kubernetes.io/name: prometheus-adapter
================================================
FILE: src/app_charts/prometheus/robot/prometheus-operator.yaml
================================================
# This includes all resources expanded from the prometheus-operator chart using
# the values in ../prometheus-cloud.values.yaml.
# Some pseudo-variables that were inserted there are replaced with actual runtime values.
{{ .Files.Get "files/prometheus-operator-chart.robot.yaml" | replace "${CLOUD_ROBOTICS_DOMAIN}" .Values.domain | replace "${GCP_PROJECT_ID}" .Values.project | replace "HELM-NAMESPACE" .Release.Namespace }}
================================================
FILE: src/app_charts/prometheus/robot/prometheus-relay-client.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus-relay-client
spec:
replicas: 1
selector:
matchLabels:
app: prometheus-relay-client
template:
metadata:
labels:
app: prometheus-relay-client
spec:
containers:
- args:
- --log_level=4 # WARN
- --backend_address=kube-prometheus.{{ .Release.Namespace }}.svc.cluster.local:9090
- --backend_scheme=http
- --relay_address={{ .Values.domain }}
- --relay_prefix=/apis/core.prometheus-relay
- --server_name={{ .Values.robot.name }}
image: {{ .Values.registry }}{{ .Values.images.http_relay_client }}
name: prometheus-relay-client
resources:
requests:
cpu: 20m
memory: 100Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
securityContext:
runAsNonRoot: true
runAsUser: 65532
runAsGroup: 65532
================================================
FILE: src/app_charts/prometheus/robot/smartctl-exporter.yaml
================================================
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: smartctl-exporter
spec:
selector:
matchLabels:
app: smartctl-exporter
template:
metadata:
labels:
app: smartctl-exporter
spec:
containers:
- name: smartctl-exporter
# Mirrored from quay.io/prometheuscommunity/smartctl-exporter
image: gcr.io/cloud-robotics-releases/smartctl-exporter:v0.12.0
args:
- --web.listen-address=:9633
securityContext:
privileged: true
runAsUser: 0
tolerations:
- operator: Exists
effect: NoSchedule
---
apiVersion: v1
kind: Service
metadata:
name: smartctl-exporter
labels:
app: smartctl-exporter
spec:
clusterIP: None
ports:
- port: 9633
name: http-metrics
selector:
app: smartctl-exporter
type: ClusterIP
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: smartctl-exporter
labels:
prometheus: kube-prometheus
spec:
endpoints:
- port: http-metrics
path: /metrics
interval: 60s
selector:
matchLabels:
app: smartctl-exporter
================================================
FILE: src/app_charts/prometheus/update_prometheus_adapter.sh
================================================
#!/bin/bash
VERSION="0.12.0"
OUT="robot/prometheus-adapter.yaml"
wget https://github.com/kubernetes-sigs/prometheus-adapter/archive/refs/tags/v"${VERSION}".tar.gz
tar xvzf v"${VERSION}".tar.gz
awk 'FNR==1 && NR>1 {print "---"}{print}' "prometheus-adapter-${VERSION}/deploy/manifests/"*.yaml > "${OUT}"
sed -i 's#replicas: 2#replicas: 1#g' "${OUT}"
sed -i 's#namespace: monitoring#namespace: {{ .Release.Namespace }}#g' "${OUT}"
sed -i 's#https://prometheus.monitoring.svc#http://kube-prometheus.{{ .Release.Namespace }}.svc#g' "${OUT}"
================================================
FILE: src/app_charts/prometheus/values-cloud.yaml
================================================
domain: "example.com"
project: "my-gcp-project"
registry: "gcr.io/my-gcp-project"
robots: []
# The default requests/limits are sufficient for small deployments with a few
# robots. For a large deployment with ~30 robots, you might need ~2CPU and
# ~12Gi mem.
# TODO(rodrigoq): can we reduce this by updating prometheus?
limits:
cpu: "2000m"
memory: "2Gi"
# The default persistent disk size. You need to adjust this defeping your fleet
# size and desired retention time.
#
# To compute the disk space required we used the formula in
# https://devops.stackexchange.com/questions/9298/how-to-calculate-disk-space-required-by-prometheus-v2-2
#
# retention_time_seconds = 90 * 24 * 60 * 60
# ingested_samples_per_second = avg(sum(rate(prometheus_tsdb_head_samples_appended_total[1d]))) = ~7000
# bytes_per_sample = avg(sum(rate(prometheus_tsdb_compaction_chunk_size_bytes_sum[1d]) /
# rate(prometheus_tsdb_compaction_chunk_samples_sum[1d]))) = ~1.0
# needed_disk_space = retention_time_seconds * ingested_samples_per_second * bytes_per_sample = ~72G
# Use a larger volume to account for future growth.
requests:
storage: "200Gi"
retention:
time: "90d"
# Keep in sync with the disksize above and keep some headroom to avoid alerts.
# Retention size should be at most 80-85% of total storage to account for WAL, chunks, and filesystem overhead.
size: "160GB"
# MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
prometheus:
prometheusSpec:
nodeSelector: {}
tolerations: []
serviceMonitor:
metricRelabelings: []
# Grafana HTTP configuration
gf_server_domain: "${CLOUD_ROBOTICS_DOMAIN}"
gf_server_root_url: "https://${CLOUD_ROBOTICS_DOMAIN}/grafana"
gf_csrf_trusted_origins: ""
# Grafana Ingress configuration. Does not use the same replace as the values above.
gf_ingress_auth_url: "http://oauth2-proxy.default.svc.cluster.local/apis/core.token-vendor/v1/token.verify"
gf_ingress_auth_signin: "https://{{ .Values.domain }}/oauth2/start?rd=$escaped_request_uri"
gf_ingress_error_page_403: "https://{{ .Values.domain }}/oauth2/start?rd=$escaped_request_uri"
# Prometheus
prom_ingress_auth_url: "http://oauth2-proxy.default.svc.cluster.local/apis/core.token-vendor/v1/token.verify"
prom_ingress_auth_signin: "https://{{ .Values.domain }}/oauth2/start?rd=$escaped_request_uri"
prom_ingress_error_page_403: "https://{{ .Values.domain }}/oauth2/start?rd=$escaped_request_uri"
# Prometheus: Using ${} replacement
prom_external_url: "https://${CLOUD_ROBOTICS_DOMAIN}/prometheus/"
# Grafana SMTP configuration
# Notes: these need to be all string, since we apply them using the template funtion "replace"
gf_smtp_enabled: "false"
gf_smtp_host: "smtp-host"
gf_smtp_user: "smtp-user"
gf_smtp_password: "smtp-api-key"
gf_smtp_from_address: "from-address@example.com"
gf_smtp_from_name: "from-name"
gf_smtp_skip_verify: "true"
================================================
FILE: src/app_charts/token-vendor/BUILD.bazel
================================================
load("//bazel:app.bzl", "app")
load("//bazel:app_chart.bzl", "app_chart")
app_chart(
name = "token-vendor-cloud",
files = [
"dashboard.json",
],
images = {
"token-vendor-go": "//src/go/cmd/token-vendor:token-vendor-image",
},
)
app(
name = "token-vendor",
charts = [
":token-vendor-cloud",
],
visibility = ["//visibility:public"],
)
================================================
FILE: src/app_charts/token-vendor/cloud/dashboard.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
name: tokenvendor-dashboards-json
labels:
grafana: "1"
data:
relay.json: |-
{{ .Files.Get "files/dashboard.json" | indent 4 }}
================================================
FILE: src/app_charts/token-vendor/cloud/ingress.yaml
================================================
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: public-key-access
annotations:
nginx.ingress.kubernetes.io/auth-url: "http://token-vendor.app-token-vendor.svc.cluster.local/apis/core.token-vendor/v1/token.verify?robots=true"
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.domain }}
http:
paths:
- path: "/apis/core.token-vendor/v1/public-key.read"
pathType: Prefix
backend:
service:
name: token-vendor
port:
name: token-vendor
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: public-key-manager
annotations:
nginx.ingress.kubernetes.io/auth-url: "http://token-vendor.app-token-vendor.svc.cluster.local/apis/core.token-vendor/v1/token.verify?robots=false"
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.domain }}
http:
paths:
- path: "/apis/core.token-vendor/v1/public-key.configure"
pathType: Prefix
backend:
service:
name: token-vendor
port:
name: token-vendor
- path: "/apis/core.token-vendor/v1/public-key.publish"
pathType: Prefix
backend:
service:
name: token-vendor
port:
name: token-vendor
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: token-vendor
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.domain }}
http:
paths:
- path: "/apis/core.token-vendor/v1/token.verify"
pathType: Prefix
backend:
service:
name: token-vendor
port:
name: token-vendor
- path: "/apis/core.token-vendor/v1/jwt.verify"
pathType: Prefix
backend:
service:
name: token-vendor
port:
name: token-vendor
- path: "/apis/core.token-vendor/v1/token.oauth2"
pathType: Prefix
backend:
service:
name: token-vendor
port:
name: token-vendor
---
================================================
FILE: src/app_charts/token-vendor/cloud/service-monitor.yaml
================================================
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: token-vendor
labels:
prometheus: kube-prometheus
spec:
endpoints:
- port: token-vendor
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
selector:
matchLabels:
app: token-vendor
================================================
FILE: src/app_charts/token-vendor/cloud/service.yaml
================================================
apiVersion: v1
kind: Service
metadata:
name: token-vendor
labels:
# This is used by the ServiceMonitor.
app: token-vendor
spec:
ports:
- port: 80
targetPort: 9090
protocol: TCP
name: token-vendor
selector:
app: token-vendor
type: ClusterIP
================================================
FILE: src/app_charts/token-vendor/cloud/token-vendor-policy.yaml
================================================
apiVersion: v1
kind: ServiceAccount
metadata:
name: token-vendor
annotations:
iam.gke.io/gcp-service-account: "token-vendor@{{ .Values.project }}.iam.gserviceaccount.com"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: token-vendor-key-mngmt
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: token-vendor-key-mngmt-binding
subjects:
- kind: ServiceAccount
name: token-vendor
roleRef:
kind: Role
name: token-vendor-key-mngmt
apiGroup: rbac.authorization.k8s.io
================================================
FILE: src/app_charts/token-vendor/cloud/token-vendor.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
name: token-vendor
spec:
replicas: 1
selector:
matchLabels:
app: token-vendor
template:
metadata:
labels:
app: token-vendor
spec:
containers:
- name: token-vendor
image: {{ .Values.registry }}{{ .Values.images.token_vendor_go }}
args:
- --log-level=4 # WARN
- --project={{ .Values.project }}
- --accepted_audience=https://{{ .Values.domain }}/apis/core.token-vendor/v1/token.oauth2
- --service_account=robot-service
# This scope is for token vendor and for access to GCS/GCR.
- --scope=https://www.googleapis.com/auth/cloud-platform
# This scope allows GKE RBAC policy bindings to refer to service accounts by email.
# https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#forbidden_error_for_service_accounts_on_vm_instances
- --scope=https://www.googleapis.com/auth/userinfo.email
{{- if .Values.use_tv_verbose }}
- --verbose
{{- end }}
{{- if eq .Values.deploy_environment "GCP-testing" }}
- --key-store=IN_MEMORY
{{- else }}
- --key-store=KUBERNETES
- --namespace=app-token-vendor
{{- end }}
ports:
- name: token-vendor
containerPort: 9090
livenessProbe:
httpGet:
path: /healthz
port: 9090
initialDelaySeconds: 15
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
securityContext:
runAsNonRoot: true
runAsUser: 65532
runAsGroup: 65532
tolerations:
- key: "workload-identity"
operator: "Equal"
value: "true"
effect: "NoSchedule"
nodeSelector:
iam.gke.io/gke-metadata-server-enabled: "true"
serviceAccountName: token-vendor
================================================
FILE: src/app_charts/token-vendor/dashboard.json
================================================
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"description": "token-vendor stats",
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 46,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"description": "successful token requests",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unitScale": true
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"id": 2,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"editorMode": "code",
"expr": "rate(tokens_requested{result=\"success\"}[$__rate_interval])",
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
"title": "Successful Requests",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"description": "failed token requests",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unitScale": true
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 1,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"disableTextWrap": false,
"editorMode": "code",
"expr": "rate(tokens_requested{result=\"failed\"}[$__rate_interval])",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A",
"useBackend": false
}
],
"title": "Failed Requests",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"description": "successful token verifications",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unitScale": true
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"id": 3,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"editorMode": "code",
"expr": "rate(tokens_verified{result=\"success\"}[$__rate_interval])",
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
"title": "Successful Verifications",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"description": "failed token verifications",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unitScale": true
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 8
},
"id": 4,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"editorMode": "code",
"expr": "rate(tokens_verified{result=\"failed\"}[$__rate_interval])",
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
"title": "Failed Verifications",
"type": "timeseries"
}
],
"refresh": "",
"schemaVersion": 39,
"tags": [
"cloud-robotics"
],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Token Vendor",
"uid": "ea96ec4a-2292-4c85-a880-846aaf4bc7b6",
"version": 3,
"weekStart": ""
}
================================================
FILE: src/bootstrap/cloud/BUILD.bazel
================================================
load("@rules_pkg//pkg:tar.bzl", "pkg_tar")
genrule(
name = "setup-robot-digest",
srcs = ["//src/go/cmd/setup-robot:setup-robot-image.digest"],
outs = ["setup-robot.digest"],
cmd = "cp $(location //src/go/cmd/setup-robot:setup-robot-image.digest) $@",
)
pkg_tar(
name = "bazel-bin",
srcs = [
":setup-robot-digest",
"//src/app_charts/base:base-cloud",
"//src/app_charts/platform-apps:platform-apps-cloud",
],
mode = "644",
package_dir = "/bazel-bin",
strip_prefix = "/",
)
pkg_tar(
name = "files",
srcs = [
"//:config.sh.tmpl",
"//src/bootstrap/cloud/terraform",
],
mode = "644",
strip_prefix = "/",
)
pkg_tar(
name = "scripts",
srcs = [
"//:deploy.sh",
"//scripts:common.sh",
"//scripts:config.sh",
"//scripts:include-config.sh",
"//scripts:set-config.sh",
"//src/bootstrap/robot:setup_robot.sh",
],
mode = "755",
strip_prefix = "/",
)
pkg_tar(
name = "binary_tools",
srcs = [
"//src/go/cmd/synk",
"//src/go/cmd/token-vendor:token-vendor-app",
"@hashicorp_terraform//:terraform",
"@kubernetes_helm//:helm",
],
mode = "755",
package_dir = "/bin",
)
pkg_tar(
name = "crc-binary",
srcs = [
"INSTALL_FROM_BINARY",
],
extension = "tar.gz",
mode = "644",
package_dir = "/cloud-robotics-core",
deps = [
":bazel-bin",
":binary_tools",
":files",
":scripts",
],
)
================================================
FILE: src/bootstrap/cloud/INSTALL_FROM_BINARY
================================================
# When this marker file is present, the deploy.sh script installs the binary instead of building
# from local sources.
================================================
FILE: src/bootstrap/cloud/run-install.sh
================================================
#!/bin/bash
#
# Copyright 2019 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{ # this ensures the entire script is downloaded #
set -o pipefail -o errexit
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BUCKET_URI=${BUCKET_URI:-"https://storage.googleapis.com/cloud-robotics-releases"}
GCP_PROJECT_ID="$1"
if [[ -z "$2" || "$2" = --* ]]; then
TARGET="latest"
COMMAND="$2"
else
TARGET="$2"
COMMAND="$3"
fi
if [[ -z "${GCP_PROJECT_ID}" || ! "${COMMAND}" =~ ^(|--set-config|--set-oauth|--delete|--terraform)$ ]]; then
echo "Usage: $0 [|] []"
echo "Supported commands:"
echo " --set-config Updates the cloud config interactively."
echo " --set-oauth Enables and configures OAuth interactively."
echo " --delete Deletes Cloud Robotics from the cloud project."
echo " --terraform Apply only terraform changes."
echo " (default) Install the specifies version."
exit 1
fi
if [[ ! "${TARGET}" = *.tar.gz ]]; then
echo "Downloading version file ${BUCKET_URI}/${TARGET}"
TARGET=$( curl --silent --show-error --fail "${BUCKET_URI}/${TARGET}" )
fi
echo "Downloading tarball ${BUCKET_URI}/${TARGET}"
TMPDIR="$( mktemp -d )"
curl --silent --show-error --fail "${BUCKET_URI}/${TARGET}" | tar xz -C "${TMPDIR}"
cd ${TMPDIR}/cloud-robotics-core
# Pass xtrace on to subshells if set for this shell.
BASH=bash
if [[ $SHELLOPTS =~ xtrace ]] ; then
BASH="bash -o xtrace"
fi
if [[ "${COMMAND}" = "--set-config" ]]; then
$BASH scripts/set-config.sh "${GCP_PROJECT_ID}"
elif [[ "${COMMAND}" = "--set-oauth" ]]; then
$BASH scripts/set-config.sh "${GCP_PROJECT_ID}" --edit-oauth
elif [[ "${COMMAND}" = "--delete" ]]; then
$BASH ./deploy.sh delete "${GCP_PROJECT_ID}"
else
# We tag the setup-robot files with this information to be able to check if
# cloud and robot-installations are in sync
export TARGET
$BASH scripts/set-config.sh "${GCP_PROJECT_ID}" --ensure-config
if [[ "${COMMAND}" = "--terraform" ]]; then
$BASH ./deploy.sh update_infra "${GCP_PROJECT_ID}"
else
$BASH ./deploy.sh create "${GCP_PROJECT_ID}"
fi
fi
cd ${DIR}
rm -rf ${TMPDIR}
} # this ensures the entire script is downloaded #
================================================
FILE: src/bootstrap/cloud/terraform/.gitignore
================================================
.terraform
config.auto.tfvars
terraform.tfvars
backend.tf
terraform.tfstate.*.backup
================================================
FILE: src/bootstrap/cloud/terraform/BUILD.bazel
================================================
filegroup(
name = "terraform",
srcs = [
"www.yaml",
] + glob(
include = ["*.tf"],
exclude = ["backend.tf"],
),
visibility = ["//visibility:public"],
)
================================================
FILE: src/bootstrap/cloud/terraform/README.md
================================================
# Terraform
These files can be used to create, update or delete your personal development
or shared projects.
To update your user project, use deploy.sh:
```shell
./deploy.sh update my-project
```
To update shared projects such as the robolab project, run:
```shell
./deploy.sh update the-robolab-project
```
Our Terraform setup is special in two ways:
1. The deploy.sh script passes some variables from the config file in
the named project as input to enable you to switch projects naturally.
Terraform doesn't read the config directly, but rather through a
`src/bootstrap/cloud/terraform/terraform.tfvars` file that is managed by
deploy.sh.
2. The Terraform state is stored on GCS, not in your client. Each project has
its own state file in the GCS bucket. When initializing
("terraform init"), you need to set -backend-config to set the right prefix.
deploy.sh also takes care of that.
To run terraform command locally, do:
```shell
bazel build @hashicorp_terraform//:terraform
alias terraform=$(realpath bazel-out/../../../external/*hashicorp_terraform/terraform)
cd src/bootstrap/cloud/terraform/
terraform init
```
================================================
FILE: src/bootstrap/cloud/terraform/address.tf
================================================
resource "google_compute_address" "cloud_robotics" {
name = "cloud-robotics"
project = data.google_project.project.project_id
region = var.region
depends_on = [google_project_service.project-services["compute.googleapis.com"]]
}
resource "google_compute_address" "cloud_robotics_ar" {
for_each = var.additional_regions
name = format("%s-%s", each.key, "ar-cloud-robotics")
project = data.google_project.project.project_id
region = each.value.region
depends_on = [google_project_service.project-services["compute.googleapis.com"]]
}
================================================
FILE: src/bootstrap/cloud/terraform/certificate-authority.tf
================================================
# Certificate related infrastructure.
#
# Configures the necessary Google private CA infrastructure to create in-cluster certificates.
# Resources in this file are only created if the certificate_provider is set to "google-cas".
resource "google_privateca_ca_pool" "ca_pool" {
project = data.google_project.project.project_id
count = var.certificate_provider == "google-cas" ? 1 : 0
name = "${data.google_project.project.project_id}-ca-pool"
location = var.region
# Enterprise is recommended for long-lasting certificates
tier = "ENTERPRISE"
publishing_options {
publish_ca_cert = true
publish_crl = true
}
issuance_policy {
baseline_values {
ca_options {
is_ca = false
}
key_usage {
base_key_usage {
digital_signature = true
key_encipherment = true
}
extended_key_usage {
server_auth = true
}
}
}
}
}
resource "google_privateca_certificate_authority" "ca" {
project = data.google_project.project.project_id
count = var.certificate_provider == "google-cas" ? 1 : 0
certificate_authority_id = "${data.google_project.project.project_id}-certificate-authority"
location = var.region
pool = google_privateca_ca_pool.ca_pool[0].name
config {
subject_config {
subject {
organization = var.certificate_subject_organization
common_name = var.certificate_subject_common_name
organizational_unit = var.certificate_subject_organizational_unit
}
}
x509_config {
ca_options {
is_ca = true
max_issuer_path_length = 10
}
key_usage {
base_key_usage {
cert_sign = true
crl_sign = true
key_encipherment = true
digital_signature = true
}
extended_key_usage {
server_auth = true
client_auth = true
}
}
}
}
type = "SELF_SIGNED"
key_spec {
algorithm = "EC_P384_SHA384"
}
}
================================================
FILE: src/bootstrap/cloud/terraform/cluster.tf
================================================
# GKE Cluster
#
# This creates a GKE cluster with Workload Identity enabled and a suitable
# service account for the nodes. This service account cannot be used by the
# workloads: see workload-identity.tf for those service accounts.
resource "google_container_cluster" "cloud-robotics" {
project = data.google_project.project.project_id
name = "cloud-robotics"
location = var.cluster_type == "zonal" ? var.zone : var.region
enable_shielded_nodes = true
depends_on = [google_project_service.project-services["container.googleapis.com"]]
# TODO(ensonic): this is temporary for the zonal -> regional switch
deletion_protection = false
# Make the cluster VPC-native (default for v1.21+)
networking_mode = "VPC_NATIVE"
datapath_provider = var.datapath_provider
# This enables setting network policies using fully qualified domain names.
# See https://cloud.google.com/kubernetes-engine/docs/how-to/fqdn-network-policies#create-fqdn-network-policy
enable_fqdn_network_policy = var.datapath_provider == "ADVANCED_DATAPATH" ? true : false
# We can't create a cluster with no node pool defined, but we want to only use
# separately managed node pools. So we create the smallest possible default
# node pool and immediately delete it.
remove_default_node_pool = true
initial_node_count = 1
addons_config {
gcs_fuse_csi_driver_config {
enabled = true
}
gke_backup_agent_config {
enabled = true
}
}
gateway_api_config {
channel = "CHANNEL_STANDARD"
}
ip_allocation_policy {}
maintenance_policy {
recurring_window {
# Dates specifies first ocurance, times are in UTC
# Start late and end early to cover regions ahead/behind the sun
# Note: we mst not make this too small, otherwise GKE cannot schedule updates
start_time = "2025-04-05T05:00:00Z"
end_time = "2025-04-06T19:00:00Z"
recurrence = "FREQ=WEEKLY;BYDAY=SA"
}
}
release_channel {
channel = "STABLE"
}
secret_manager_config {
enabled = var.secret_manager_plugin
}
timeouts {
create = "1h"
update = "1h"
delete = "1h"
}
vertical_pod_autoscaling {
enabled = true
}
workload_identity_config {
workload_pool = "${data.google_project.project.project_id}.svc.id.goog"
}
}
resource "google_container_cluster" "cloud-robotics-ar" {
for_each = var.additional_regions
project = data.google_project.project.project_id
name = format("%s-%s", each.key, "ar-cloud-robotics")
location = var.cluster_type == "zonal" ? each.value.zone : each.value.region
enable_shielded_nodes = true
depends_on = [google_project_service.project-services["container.googleapis.com"]]
# TODO(ensonic): this is temporary for the zonal -> regional switch
deletion_protection = false
# Make the cluster VPC-native (default for v1.21+)
networking_mode = "VPC_NATIVE"
ip_allocation_policy {}
# We can't create a cluster with no node pool defined, but we want to only use
# separately managed node pools. So we create the smallest possible default
# node pool and immediately delete it.
remove_default_node_pool = true
initial_node_count = 1
timeouts {
create = "1h"
update = "1h"
delete = "1h"
}
vertical_pod_autoscaling {
enabled = true
}
workload_identity_config {
workload_pool = "${data.google_project.project.project_id}.svc.id.goog"
}
}
# This node pool uses Workload Identity and a non-default service account, as
# recommended for improved security.
resource "google_container_node_pool" "cloud_robotics_base_pool" {
project = data.google_project.project.project_id
name = "base-pool"
location = var.cluster_type == "zonal" ? var.zone : var.region
cluster = google_container_cluster.cloud-robotics.name
initial_node_count = 2
autoscaling {
min_node_count = 1
max_node_count = 16
}
node_config {
machine_type = "e2-standard-4"
# The GKE Metadata Server enables Workload Identity.
workload_metadata_config {
mode = "GKE_METADATA"
}
service_account = google_service_account.gke_node.email
oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/userinfo.email",
]
}
}
resource "google_container_node_pool" "cloud_robotics_base_pool_ar" {
for_each = var.additional_regions
project = data.google_project.project.project_id
name = format("%s-%s", "base-pool-ar", each.key)
location = var.cluster_type == "zonal" ? each.value.zone : each.value.region
cluster = google_container_cluster.cloud-robotics-ar[each.key].name
initial_node_count = 2
autoscaling {
min_node_count = 1
max_node_count = 10
}
node_config {
machine_type = "e2-standard-4"
# The GKE Metadata Server enables Workload Identity.
workload_metadata_config {
mode = "GKE_METADATA"
}
service_account = google_service_account.gke_node.email
oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/userinfo.email",
]
}
}
# These bindings are based on:
# https://cloud.google.com/kubernetes-engine/docs/how-to/hardening-your-cluster#use_least_privilege_sa
resource "google_service_account" "gke_node" {
account_id = "gke-node"
display_name = "gke-node"
}
resource "google_project_iam_member" "gke_node_roles" {
project = data.google_project.project.project_id
member = google_service_account.gke_node.member
for_each = toset([
# GKE recommendations
"roles/logging.logWriter",
"roles/monitoring.metricWriter",
"roles/stackdriver.resourceMetadata.writer",
])
role = each.key
}
================================================
FILE: src/bootstrap/cloud/terraform/dns.tf
================================================
resource "google_dns_managed_zone" "external-dns" {
name = "external-dns"
dns_name = "${var.domain}."
count = var.domain == "" ? 0 : 1
# This is used to be true but is no longer, but we keep it here so that
# Terraform doesn't delete and recreate the zone.
description = "Automatically managed zone by kubernetes.io/external-dns"
depends_on = [google_project_service.project-services["dns.googleapis.com"]]
}
resource "google_dns_record_set" "www-entry" {
name = "${var.domain}."
count = var.domain == "" ? 0 : 1
type = "A"
ttl = 300
rrdatas = [google_compute_address.cloud_robotics.address]
managed_zone = google_dns_managed_zone.external-dns[0].name
project = google_dns_managed_zone.external-dns[0].project
}
# This is a record, for example, for europe-west1.subdomain.example.com.
# It's used to serve requests in a region closer to the user.
resource "google_dns_record_set" "www-entry-ar" {
for_each = var.domain == "" ? {} : var.additional_regions
name = "${each.key}.${var.domain}."
type = "A"
ttl = 300
rrdatas = [google_compute_address.cloud_robotics_ar[each.key].address]
managed_zone = google_dns_managed_zone.external-dns[0].name
project = google_dns_managed_zone.external-dns[0].project
}
# Allow cert-manager to solve DNS01 challenges in this zone.
data "google_iam_policy" "external-dns" {
binding {
role = "roles/dns.admin"
members = [
google_service_account.cert_manager.member
]
}
}
resource "google_dns_managed_zone_iam_policy" "external-dns" {
count = var.domain == "" ? 0 : 1
project = google_dns_managed_zone.external-dns[0].project
managed_zone = google_dns_managed_zone.external-dns[0].name
policy_data = data.google_iam_policy.external-dns.policy_data
}
================================================
FILE: src/bootstrap/cloud/terraform/endpoints.tf
================================================
# Do not use count to create these 2 conditionally. A deleted service needs to be manually undeleted
# within 30 days of deletion if one wishes to use it again.
resource "google_endpoints_service" "www_service" {
service_name = "www.endpoints.${var.id}.cloud.goog"
project = var.id
openapi_config = templatefile(
"www.yaml",
{
GCP_PROJECT_ID = var.id
INGRESS_IP = google_compute_address.cloud_robotics.address
}
)
}
================================================
FILE: src/bootstrap/cloud/terraform/gcs.tf
================================================
resource "google_storage_bucket" "robot" {
name = "${var.id}-robot"
location = var.region
storage_class = "REGIONAL"
uniform_bucket_level_access = "true"
force_destroy = "true"
depends_on = [google_project_service.project-services["storage-component.googleapis.com"]]
count = var.onprem_federation ? 1 : 0
}
resource "google_storage_bucket_object" "setup_robot_image_reference" {
name = "setup_robot_image_reference.txt"
content = var.robot_image_reference
bucket = google_storage_bucket.robot[0].name
cache_control = "private, max-age=0, no-transform"
count = var.onprem_federation ? 1 : 0
}
resource "google_storage_bucket_object" "setup_robot_crc_version" {
name = "setup_robot_crc_version.txt"
content = var.crc_version
bucket = google_storage_bucket.robot[0].name
cache_control = "private, max-age=0, no-transform"
count = var.onprem_federation ? 1 : 0
}
resource "google_storage_bucket_object" "setup_robot" {
name = "setup_robot.sh"
source = "../../robot/setup_robot.sh"
bucket = google_storage_bucket.robot[0].name
cache_control = "private, max-age=0, no-transform"
count = var.onprem_federation ? 1 : 0
}
# We did not always create the bucket here, but sometimes elsewhere. Import it
# to consitently manage it from here now.
import {
to = google_storage_bucket.config_store
id = "${var.id}-cloud-robotics-config"
}
resource "google_storage_bucket" "config_store" {
name = "${var.id}-cloud-robotics-config"
location = "US"
storage_class = "STANDARD"
force_destroy = true
uniform_bucket_level_access = true
}
resource "google_storage_bucket_object" "config_store_crc_version" {
name = "crc_version.txt"
content = var.crc_version
bucket = google_storage_bucket.config_store.name
cache_control = "private, max-age=0, no-transform"
}
================================================
FILE: src/bootstrap/cloud/terraform/input.tf
================================================
variable "name" {
description = "Deployment's human-readable name (my-project)"
}
variable "id" {
description = "Deployment's project id (my-project)"
}
variable "domain" {
description = "Deployment domain (www.example.com)"
}
variable "zone" {
description = "Cloud zone to deploy to (europe-west1-c)"
}
variable "region" {
description = "Cloud region to deploy to (europe-west1)"
}
variable "additional_regions" {
description = "Cloud regions to deploy additional relays to"
type = map(any)
default = {}
}
variable "shared_owner_group" {
description = "Name of a group to be added as a owner. Leave empty to not use group sharing."
default = ""
}
variable "robot_image_reference" {
description = "Reference to the Docker image installed by the setup-robot script"
}
variable "crc_version" {
description = "cloudrobotics-core version tag stored with the setup-robot script to align cloud and robot versions."
}
variable "private_image_repositories" {
description = "Projects with private GCR image repositories where we need to add IAM access rules."
type = list(any)
default = []
}
variable "certificate_provider" {
description = "Certificate provider to use to generate certificates for in-cluster services. Should be one of: lets-encrypt, google-cas."
type = string
}
variable "certificate_subject_common_name" {
description = "Certificate Common Name (CN) field"
type = string
}
variable "certificate_subject_organization" {
description = "Certificate Subject Organization (O) field"
type = string
}
variable "certificate_subject_organizational_unit" {
description = "Certificate Subject Organizational Unit (OU) field"
type = string
default = null
}
variable "cluster_type" {
description = "GKE cluster type. Must be one of {zonal,regional}."
type = string
default = "zonal"
validation {
condition = contains(["zonal", "regional"], var.cluster_type)
error_message = "Must be either \"zonal\" or \"regional\"."
}
}
variable "datapath_provider" {
description = "Whether to use Dataplane v1 or v2 (DATAPATH_PROVIDER_UNSPECIFIED or ADVANCED_DATAPATH)."
type = string
default = "DATAPATH_PROVIDER_UNSPECIFIED"
validation {
condition = contains(["DATAPATH_PROVIDER_UNSPECIFIED", "ADVANCED_DATAPATH"], var.datapath_provider)
error_message = "Must be either \"DATAPATH_PROVIDER_UNSPECIFIED\" or \"ADVANCED_DATAPATH\"."
}
}
variable "onprem_federation" {
description = "Enable google cloud robotics layer 1"
type = bool
default = true
}
variable "secret_manager_plugin" {
description = "Enable GKE secret manager integration with GKE"
type = bool
default = false
}
================================================
FILE: src/bootstrap/cloud/terraform/logging.tf
================================================
# New log bucket with short TTL for access logs
resource "google_logging_project_bucket_config" "remote_access_bucket" {
project = data.google_project.project.project_id
location = "global"
retention_days = 2
bucket_id = "RemoteAccess"
}
# Log all access logs to this dedicated bucket
resource "google_logging_project_sink" "remote_access_sink" {
name = "remote-access-sink"
destination = "logging.googleapis.com/projects/${var.id}/locations/global/buckets/RemoteAccess"
filter = "resource.type=\"k8s_container\" resource.labels.cluster_name=\"cloud-robotics\" (resource.labels.container_name=\"nginx-ingress-controller\" OR resource.labels.container_name=\"oauth2-proxy\")"
unique_writer_identity = true
}
# Don't store access logs in "_Default" bucket anymore
resource "google_logging_project_exclusion" "remote_access_exclusion" {
name = "remote-access-exclusion"
filter = "resource.type=\"k8s_container\" resource.labels.cluster_name=\"cloud-robotics\" (resource.labels.container_name=\"nginx-ingress-controller\" OR resource.labels.container_name=\"oauth2-proxy\")"
}
================================================
FILE: src/bootstrap/cloud/terraform/multi-cluster-ingress.tf
================================================
# Multi-Cluster Ingress
#
# If the config calls for additional regions, this registers them all into one
# "fleet" (aka GKE Hub). It enables multi-cluster services and multi-cluster
# ingress, so that the primary "cloud-robotics" cluster is the source-of-truth
# for gateway configuration.
resource "google_gke_hub_feature" "multi_cluster_service_discovery" {
count = length(var.additional_regions) > 0 ? 1 : 0
name = "multiclusterservicediscovery"
location = "global"
project = data.google_project.project.project_id
depends_on = [google_project_service.project-services["gkehub.googleapis.com"]]
}
resource "google_gke_hub_feature" "multi_cluster_ingress" {
count = length(var.additional_regions) > 0 ? 1 : 0
name = "multiclusteringress"
location = "global"
project = data.google_project.project.project_id
spec {
multiclusteringress {
config_membership = google_gke_hub_membership.cloud_robotics[0].id
}
}
depends_on = [google_project_service.project-services["gkehub.googleapis.com"]]
}
# The GKE cluster called "cloud-robotics" is the primary cluster and the source
# for Gateway configs.
#
# Both of these memberships set location = . I don't know if
# this is important or if it's just Anthos metadata, but this is what
# `gcloud container fleet memberships register` does (as opposed to what its
# docs say, which is that it uses the location rather than the region of the
# cluster, which could be a zone).
resource "google_gke_hub_membership" "cloud_robotics" {
count = length(var.additional_regions) > 0 ? 1 : 0
membership_id = "cloud-robotics"
project = data.google_project.project.project_id
location = var.region
endpoint {
gke_cluster {
resource_link = google_container_cluster.cloud-robotics.id
}
}
depends_on = [google_project_service.project-services["gkehub.googleapis.com"]]
}
resource "google_gke_hub_membership" "cloud_robotics_ar" {
for_each = var.additional_regions
project = data.google_project.project.project_id
membership_id = format("%s-%s", each.key, "ar-cloud-robotics")
location = each.value.region
endpoint {
gke_cluster {
resource_link = google_container_cluster.cloud-robotics-ar[each.key].id
}
}
depends_on = [google_project_service.project-services["gkehub.googleapis.com"]]
}
# The following IAM policies are required by the Gateway API controllers.
resource "google_project_iam_member" "multi_cluster_service_importer_network_viewer" {
count = length(var.additional_regions) > 0 ? 1 : 0
project = data.google_project.project.project_id
role = "roles/compute.networkViewer"
member = "serviceAccount:${data.google_project.project.project_id}.svc.id.goog[gke-mcs/gke-mcs-importer]"
depends_on = [google_container_cluster.cloud-robotics]
}
resource "google_project_iam_member" "multi_cluster_ingress_controller_container_admin" {
count = length(var.additional_regions) > 0 ? 1 : 0
project = data.google_project.project.project_id
role = "roles/container.admin"
member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-multiclusteringress.iam.gserviceaccount.com"
depends_on = [google_project_service.project-services["multiclusteringress.googleapis.com"]]
}
================================================
FILE: src/bootstrap/cloud/terraform/output.tf
================================================
output "ingress-ip" {
value = google_compute_address.cloud_robotics.address
}
output "ingress-ip-ar" {
value = {
for address in google_compute_address.cloud_robotics_ar : address.name => address.address
}
}
output "cluster-location" {
value = google_container_cluster.cloud-robotics.location
}
================================================
FILE: src/bootstrap/cloud/terraform/project.tf
================================================
# We've stopped managing Google Cloud projects in this Terraform, make sure they
# aren't deleted.
removed {
from = google_project.project
lifecycle {
destroy = false
}
}
data "google_project" "project" {
project_id = var.id
}
resource "google_project_iam_member" "owner_group" {
count = var.shared_owner_group == "" ? 0 : 1
project = data.google_project.project.project_id
role = "roles/editor"
member = "group:${var.shared_owner_group}"
}
# We can't use google_project_services because Endpoints adds services
# dynamically.
resource "google_project_service" "project-services" {
project = data.google_project.project.project_id
disable_on_destroy = false
for_each = toset(concat([
"artifactregistry.googleapis.com",
# Next 2 are needed for Terraform's data.google_project.project.resource to work.
"cloudbilling.googleapis.com",
"cloudresourcemanager.googleapis.com",
"compute.googleapis.com",
"container.googleapis.com",
"containersecurity.googleapis.com",
"dns.googleapis.com",
"endpoints.googleapis.com",
"iam.googleapis.com",
"iamcredentials.googleapis.com",
"logging.googleapis.com",
"servicecontrol.googleapis.com",
# Next 2 are needed fro terraform again
"servicemanagement.googleapis.com",
"serviceusage.googleapis.com",
"storage-component.googleapis.com",
], length(var.additional_regions) == 0 ? [] : [
# Following APIs are only needed when using multi-cluster gateways.
"gkeconnect.googleapis.com",
"gkehub.googleapis.com",
"trafficdirector.googleapis.com",
"multiclusterservicediscovery.googleapis.com",
"multiclusteringress.googleapis.com",
]))
service = each.value
}
# This is needed to allow creating certificates in GCP.
resource "google_project_service" "certificateauthority" {
project = data.google_project.project.project_id
# Only enable if Google CAS is the Certificate Authority
count = var.certificate_provider == "google-cas" ? 1 : 0
service = "privateca.googleapis.com"
disable_on_destroy = false
}
================================================
FILE: src/bootstrap/cloud/terraform/provider.tf
================================================
provider "google" {
project = var.id
region = var.region
}
================================================
FILE: src/bootstrap/cloud/terraform/registry.tf
================================================
# Container registry configuration
locals {
service_acounts = flatten([
google_service_account.gke_node.member,
google_service_account.human-acl.member,
var.onprem_federation ? [google_service_account.robot-service[0].member] : [],
])
# TODO: use the regional repos depending on settings in the future
private_repo_access = flatten([
for sa in local.service_acounts : [
for prj in var.private_image_repositories : {
prj = prj
sa = sa
}
]
])
std_repositories = [
{ repository = "asia.gcr.io", location="asia" },
{ repository = "eu.gcr.io", location="europe" },
{ repository = "gcr.io", location="us" },
{ repository = "us.gcr.io", location="us" },
]
}
# import existing repos, see: gcloud artifacts repositories list --project=
# uncomment if updating an old project, we can't leave it on by default due to:
# https://github.com/hashicorp/terraform/issues/33633
#import {
# id = "projects/${data.google_project.project.project_id}/locations/asia/repositories/asia.gcr.io"
# to = google_artifact_registry_repository.gcrio_repositories[0]
#}
#import {
# id = "projects/${data.google_project.project.project_id}/locations/europe/repositories/eu.gcr.io"
# to = google_artifact_registry_repository.gcrio_repositories[1]
#}
#import {
# id = "projects/${data.google_project.project.project_id}/locations/us/repositories/gcr.io"
# to = google_artifact_registry_repository.gcrio_repositories[2]
#}
#import {
# id = "projects/${data.google_project.project.project_id}/locations/us/repositories/us.gcr.io"
# to = google_artifact_registry_repository.gcrio_repositories[3]
#}
resource "google_artifact_registry_repository" "gcrio_repositories" {
project = data.google_project.project.project_id
location = local.std_repositories[count.index].location
repository_id = local.std_repositories[count.index].repository
format = "docker"
cleanup_policy_dry_run = false
cleanup_policies {
id = "delete-untagged"
action = "DELETE"
condition {
tag_state = "UNTAGGED"
older_than = "30d"
}
}
count = length(local.std_repositories)
}
resource "google_artifact_registry_repository_iam_member" "gcrio_gar_reader" {
project = data.google_project.project.project_id
location = "us"
repository = "gcr.io"
role = "roles/artifactregistry.reader"
count = length(local.service_acounts)
member = local.service_acounts[count.index]
depends_on = [ google_artifact_registry_repository.gcrio_repositories ]
}
resource "google_artifact_registry_repository_iam_member" "private_gcrio_gar_reader" {
location = "us"
repository = "gcr.io"
role = "roles/artifactregistry.reader"
count = length(local.private_repo_access)
project = local.private_repo_access[count.index].prj
member = local.private_repo_access[count.index].sa
depends_on = [ google_artifact_registry_repository.gcrio_repositories ]
}
================================================
FILE: src/bootstrap/cloud/terraform/service-account.tf
================================================
# Configuration for the following service accounts:
#
# - robot-service@, which is used by workloads on robot clusters to access GCP
# APIs, as well as services like the k8s-relay though ingress-nginx.
# - human-acl@, which is used as a "virtual permission" for users of the
# cluster, allowing access to services like Grafana through ingress-nginx.
# It can also be used to generate tokens for registering new clusters to the
# cloud.
resource "google_service_account" "robot-service" {
account_id = "robot-service"
display_name = "robot-service"
project = data.google_project.project.project_id
count = var.onprem_federation ? 1 : 0
}
# Allow the the token-vendor to impersonate the "robot-service" service account
# and to create new tokens for it.
data "google_iam_policy" "robot-service" {
binding {
# Security note from b/120897889: This permission allows privilege escalation
# if granted too widely. Make sure that the robot-service can't mint tokens
# for accounts other than itself! If in doubt, review this section carefully.
# In particular, this serves as the default service account for all containers
# running in the GKE cluster
role = "roles/iam.serviceAccountTokenCreator"
members = [
google_service_account.token_vendor.member,
]
}
binding {
role = "roles/iam.serviceAccountUser"
members = [
google_service_account.token_vendor.member,
# This seemingly nonsensical binding is necessary for the robot auth
# path in the K8s relay, which has to work with GCP auth tokens.
google_service_account.robot-service[0].member,
]
}
count = var.onprem_federation ? 1 : 0
}
# Bind policy to the "robot-service" service account.
# More: https://cloud.google.com/iam/docs/service-accounts#service_account_permissions
resource "google_service_account_iam_policy" "robot-service" {
service_account_id = google_service_account.robot-service[0].name
policy_data = data.google_iam_policy.robot-service[0].policy_data
count = var.onprem_federation ? 1 : 0
}
resource "google_project_iam_member" "robot-service-roles" {
project = data.google_project.project.project_id
member = google_service_account.robot-service[0].member
for_each = var.onprem_federation ? toset([
"roles/cloudtrace.agent", # Upload cloud traces
"roles/container.clusterViewer", # Sync CRs from the GKE cluster.
"roles/logging.logWriter", # Upload text logs to Cloud logging
# Required to use robot-service@ for GKE clusters that simulate robots
"roles/monitoring.viewer",
]) : toset([])
role = each.value
}
resource "google_service_account" "human-acl" {
account_id = "human-acl"
display_name = "human-acl"
project = data.google_project.project.project_id
}
resource "google_service_account_iam_member" "human-acl-shared-owner-account-user" {
count = var.shared_owner_group == "" ? 0 : 1
service_account_id = google_service_account.human-acl.name
role = "roles/iam.serviceAccountUser"
member = "group:${var.shared_owner_group}"
}
###
# The following permissions make human-acl@ tokens work with setup_robot.sh.
# To create such tokens, the user needs roles/iam.serviceAccountTokenCreator.
# https://cloud.google.com/iam/docs/create-short-lived-credentials-direct
#
# This also RBAC policy to create Robot CRs, defined in
# src/app_charts/base/cloud/registry-policy.yaml.
###
# Allow reading GCS objects such as setup_robot_crc_version.txt.
resource "google_project_iam_member" "human-acl-object-viewer" {
project = data.google_project.project.project_id
role = "roles/storage.objectViewer"
member = google_service_account.human-acl.member
}
# Allow robot registration with the token vendor, which checks if the client's
# token can "act as" the human-acl@ SA. We need this binding even if the
# client provided a token for the human-acl@ SA itself.
resource "google_service_account_iam_member" "human-acl-act-as-self" {
service_account_id = google_service_account.human-acl.name
role = "roles/iam.serviceAccountUser"
member = google_service_account.human-acl.member
}
# Grant permissions to generate tokens for registering new workcell clusters.
# This lets users run:
# gcloud auth print-access-token \
# --impersonate-service-account=human-acl@${PROJECT_ID}.iam.gserviceaccount.com
# so they can register new workcell clusters without passing their own tokens
# (which aren't limited to a single GCP project) into the cluster.
resource "google_service_account_iam_member" "human-acl-token-generator" {
service_account_id = google_service_account.human-acl.name
role = "roles/iam.serviceAccountTokenCreator"
member = "group:${var.shared_owner_group}"
count = var.shared_owner_group == "" ? 0 : 1
}
================================================
FILE: src/bootstrap/cloud/terraform/versions.tf
================================================
terraform {
required_providers {
google = {
source = "hashicorp/google"
}
}
required_version = ">= 1.11.4"
}
================================================
FILE: src/bootstrap/cloud/terraform/workload-identity.tf
================================================
# IAM Configuration for GKE cluster workloads using Workload Identity.
# See also cluster.tf.
# token-vendor
##############
resource "google_service_account" "token_vendor" {
account_id = "token-vendor"
display_name = "token-vendor"
project = data.google_project.project.project_id
}
# Allow the app-token-vendor/token-vendor Kubernetes SA to use this GCP SA.
resource "google_service_account_iam_policy" "token_vendor" {
service_account_id = google_service_account.token_vendor.name
policy_data = data.google_iam_policy.token_vendor.policy_data
# Avoid Error 400: Identity Pool does not exist (my-project.svc.id.goog).
depends_on = [google_container_cluster.cloud-robotics]
}
data "google_iam_policy" "token_vendor" {
binding {
role = "roles/iam.workloadIdentityUser"
members = [
"serviceAccount:${data.google_project.project.project_id}.svc.id.goog[app-token-vendor/token-vendor]"
]
}
}
# Note: the policy in service-account.tf allows the token-vendor to create
# new tokens for the robot-service@ service account.
# cert-manager
##############
resource "google_service_account" "cert_manager" {
account_id = "cert-manager"
display_name = "cert-manager"
project = data.google_project.project.project_id
}
data "google_iam_policy" "cert_manager" {
binding {
role = "roles/iam.workloadIdentityUser"
members = [
"serviceAccount:${data.google_project.project.project_id}.svc.id.goog[default/cert-manager]",
]
}
}
resource "google_service_account_iam_policy" "cert_manager" {
service_account_id = google_service_account.cert_manager.id
policy_data = data.google_iam_policy.cert_manager.policy_data
depends_on = [google_container_cluster.cloud-robotics]
}
# Instead of granting dns.admin on the whole project as recommended, grant
# reader on the project (needed to list managed zones) and admin on the
# individual zones that cert-manager uses.
resource "google_project_iam_member" "cert_manager_dns_reader" {
project = data.google_project.project.project_id
role = "roles/dns.reader"
member = google_service_account.cert_manager.member
}
# cert-manager-google-cas-issuer
################################
###
# The following resources enable Google's Certificate Authority Service (CAS) support.
# They are required to access the CAS service to generate certificates for cluster resources.
###
resource "google_service_account" "google-cas-issuer" {
count = var.certificate_provider == "google-cas" ? 1 : 0
account_id = "sa-google-cas-issuer"
display_name = "sa-google-cas-issuer"
description = "Service account used by GKE cert-manager's google-cas-issuer to emit certificates using Google's Certificate Authority Service (CAS)."
}
# Bind IAM policies to the "sa-google-cas-issuer" service account.
# Allow the SA to create private CA pool certificates.
resource "google_privateca_ca_pool_iam_member" "ca-pool-workload-identity" {
count = var.certificate_provider == "google-cas" ? 1 : 0
ca_pool = google_privateca_ca_pool.ca_pool[0].id
role = "roles/privateca.certificateManager"
member = google_service_account.google-cas-issuer[0].member
}
# Define IAM policy for the workload identity user.
# This allows the Kubernetes service account to act as the GKE service account.
data "google_iam_policy" "google-cas-issuer" {
count = var.certificate_provider == "google-cas" ? 1 : 0
binding {
role = "roles/iam.workloadIdentityUser"
members = [
"serviceAccount:${data.google_project.project.project_id}.svc.id.goog[default/cert-manager-google-cas-issuer]",
]
}
}
resource "google_service_account_iam_policy" "google-cas-issuer" {
count = var.certificate_provider == "google-cas" ? 1 : 0
service_account_id = google_service_account.google-cas-issuer[0].id
policy_data = data.google_iam_policy.google-cas-issuer[0].policy_data
depends_on = [google_container_cluster.cloud-robotics]
}
================================================
FILE: src/bootstrap/cloud/terraform/www.yaml
================================================
swagger: "2.0"
info:
version: "1.0.0"
title: Cloud Robotics APIs
description: Example of the bare minimum Swagger spec
host: www.endpoints.${GCP_PROJECT_ID}.cloud.goog
x-google-endpoints:
- name: "www.endpoints.${GCP_PROJECT_ID}.cloud.goog"
target: "${INGRESS_IP}"
paths:
/:
get:
operationId: getAll
responses:
"200":
description: OK
================================================
FILE: src/bootstrap/robot/BUILD.bazel
================================================
exports_files([
"setup_robot.sh",
])
================================================
FILE: src/bootstrap/robot/setup_robot.sh
================================================
#!/bin/bash
#
# Copyright 2019 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is a convenience wrapper for starting the setup-robot container, i.e., for doing
# "kubectl run ... --image=...setup-robot...".
set -e
set -o pipefail
function kc {
kubectl --context="${KUBE_CONTEXT}" "$@"
}
function faketty {
# Run command inside a TTY.
script -qfec "$(printf "%q " "$@")" /dev/null
}
if [[ ! "$*" =~ "--project" && $# -ge 2 ]] ; then
echo "WARNING: using only positional arguments for setup_robot.sh is deprecated." >&2
echo " Please use the following invocation instead. Setup continues in 60 seconds..." >&2
echo " setup-robot --project \\" >&2
echo " [--robot-type ] [--app-management]" >&2
# Sleep, as the warning can't be seen after the helm output fills the screen.
sleep 60
# Rewrite parameters to new usage.
set -- "$2" --project "$1" --robot-type "${3:-}"
fi
# Extract the project from the command-line args. It is required to identify the reference for the
# setup-robot image. This is challenging as --project is an option, so we have to do some
# rudimentary CLI parameter parsing.
for i in $(seq 1 $#) ; do
if [[ "${!i}" == "--project" ]] ; then
j=$((i+1))
PROJECT=${!j}
fi
done
if [[ -z "$PROJECT" ]] ; then
echo "ERROR: --project is required" >&2
exit 1
fi
if [[ -z "${KUBE_CONTEXT}" ]] ; then
KUBE_CONTEXT=kubernetes-admin@kubernetes
fi
if [[ -n "$ACCESS_TOKEN_FILE" ]]; then
ACCESS_TOKEN=$(cat ${ACCESS_TOKEN_FILE})
fi
if [[ -z "$ACCESS_TOKEN" ]]; then
echo "Generate access token with gcloud:"
echo " gcloud auth application-default print-access-token"
echo "Enter access token:"
read ACCESS_TOKEN
fi
if [[ -z "${HOST_HOSTNAME}" ]] ; then
HOST_HOSTNAME=$(hostname)
fi
# Full reference to the setup-robot image.
IMAGE_REFERENCE=$(curl -fsSL -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"https://storage.googleapis.com/${PROJECT}-robot/setup_robot_image_reference.txt") || \
IMAGE_REFERENCE=""
CRC_VERSION=$(curl -fsSL -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"https://storage.googleapis.com/${PROJECT}-robot/setup_robot_crc_version.txt") || \
CRC_VERSION=""
if [[ -z "$IMAGE_REFERENCE" ]] ; then
echo "ERROR: failed to get setup_robot_image_reference.txt from GCS" >&2
exit 1
fi
# Extract registry from IMAGE_REFERENCE. E.g.:
# IMAGE_REFERENCE = "eu.gcr.io/my-project/setup-robot@sha256:07...5465244d"
# REGISTRY = "eu.gcr.io/my-project"
# REGISTRY_DOMAIN = "eu.gcr.io"
REGISTRY=${IMAGE_REFERENCE%/*}
REGISTRY_DOMAIN=${IMAGE_REFERENCE%%/*}
if [[ "$SKIP_LOCAL_PULL" != "true" && "$REGISTRY" != "gcr.io/cloud-robotics-releases" ]] ; then
# The user has built setup-robot from source and pushed it to a private
# registry. If so, k8s may not yet have credentials that can pull from a
# private registry, so do it directly.
echo "Pulling image from ${REGISTRY_DOMAIN}..."
private_registry_enabled=0
if hash docker &> /dev/null ; then
echo ${ACCESS_TOKEN} | docker login -u oauth2accesstoken --password-stdin https://${REGISTRY_DOMAIN} || true
if docker pull ${IMAGE_REFERENCE}; then
private_registry_enabled=1
else
docker logout https://${REGISTRY_DOMAIN}
echo "WARNING: failed to pull setup-robot image using 'docker pull'" >&2
fi
docker logout https://${REGISTRY_DOMAIN}
fi
if hash crictl &> /dev/null ; then
if crictl pull --creds "oauth2accesstoken:${ACCESS_TOKEN}" "${IMAGE_REFERENCE}" ; then
private_registry_enabled=1
else
echo "WARNING: failed to pull setup-robot image using 'crictl pull'" >&2
fi
fi
if [[ $private_registry_enabled == "0" ]]; then
echo "ERROR: failed to find 'crictl' or 'docker' binary. This is required when" >&2
echo " Cloud Robotics Core was deployed from source." >&2
exit 1
fi
fi
# Wait for creation of the default service account.
# https://github.com/kubernetes/kubernetes/issues/66689
i=0
until kc get serviceaccount default &>/dev/null; do
sleep 1
i=$((i + 1))
if ((i >= 60)) ; then
# Try again, without suppressing stderr this time.
if ! kc get serviceaccount default >/dev/null; then
echo "ERROR: 'kubectl get serviceaccount default' failed" >&2
exit 1
fi
fi
done
# Remove old unmanaged cert
if ! kc get secrets cluster-authority --ignore-not-found -o yaml | grep -q "cert-manager.io/certificate-name: selfsigned-ca"; then
kc delete secrets cluster-authority 2> /dev/null || true
fi
# Remove legacy helm resources
kc -n kube-system delete deploy tiller-deploy 2> /dev/null || true
kc -n kube-system delete service tiller-deploy 2> /dev/null || true
kc -n kube-system delete clusterrolebinding tiller 2> /dev/null || true
kc -n kube-system delete sa tiller 2> /dev/null || true
kc -n kube-system delete cm -l OWNER=TILLER 2> /dev/null
# Cleanup old resources
kc -n default delete secret robot-master-tls 2> /dev/null || true
# Remove previous instance, in case installation was canceled
kc delete pod setup-robot 2> /dev/null || true
faketty kubectl --context "${KUBE_CONTEXT}" run setup-robot --restart=Never -it --rm \
--pod-running-timeout=3m \
--image=${IMAGE_REFERENCE} \
--env="ACCESS_TOKEN=${ACCESS_TOKEN}" \
--env="REGISTRY=${REGISTRY}" \
--env="HOST_HOSTNAME=${HOST_HOSTNAME}" \
--env="CRC_VERSION=${CRC_VERSION}" \
-- "$@"
================================================
FILE: src/go/cmd/app-rollout-controller/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
load("@rules_oci//oci:defs.bzl", "oci_image")
load("@rules_pkg//pkg:tar.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = ["main.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/app-rollout-controller",
visibility = ["//visibility:private"],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"//src/go/pkg/apis/registry/v1alpha1:go_default_library",
"//src/go/pkg/controller/approllout:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@io_k8s_apimachinery//pkg/runtime:go_default_library",
"@io_k8s_client_go//kubernetes/scheme:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
"@io_k8s_helm//pkg/chartutil:go_default_library",
"@io_k8s_helm//pkg/strvals:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/healthz:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/log:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/log/zap:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/manager:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/manager/signals:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/metrics/server:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/webhook:go_default_library",
],
)
go_binary(
name = "app-rollout-controller-app",
embed = [":go_default_library"],
visibility = ["//visibility:private"],
)
pkg_tar(
name = "app-rollout-controller-image-layer",
srcs = [":app-rollout-controller-app"],
extension = "tar.gz",
)
oci_image(
name = "app-rollout-controller-image",
base = "@distroless_base",
entrypoint = ["/app-rollout-controller-app"],
tars = [":app-rollout-controller-image-layer"],
visibility = ["//visibility:public"],
)
================================================
FILE: src/go/cmd/app-rollout-controller/main.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Runs the app rollout controller which creates and deletes Kubernetes deployments
// to bring them into agreement with configuration.
package main
import (
"context"
"flag"
"fmt"
"log/slog"
"os"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
registry "github.com/googlecloudrobotics/core/src/go/pkg/apis/registry/v1alpha1"
"github.com/googlecloudrobotics/core/src/go/pkg/controller/approllout"
"github.com/googlecloudrobotics/ilog"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/helm/pkg/chartutil"
"k8s.io/helm/pkg/strvals"
"sigs.k8s.io/controller-runtime/pkg/healthz"
ctrllog "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
var (
params = flag.String("params", "", "Helm configuration parameters formatted as name=value,topname.subname=value")
healthzPort = flag.Int("healthz-port", 8080, "Listening port of the /healthz probe")
webhookPort = flag.Int("webhook-port", 9876, "Listening port of the custom resource webhook")
certDir = flag.String("cert-dir", "", "Directory for TLS certificates")
logLevel = flag.Int("log-level", int(slog.LevelInfo), "the log message level required to be logged")
)
func main() {
flag.Parse()
logHandler := ilog.NewLogHandler(slog.Level(*logLevel), os.Stderr)
slog.SetDefault(slog.New(logHandler))
ctx := context.Background()
kubernetesConfig, err := rest.InClusterConfig()
if err != nil {
slog.Error("Failed to initialize Kubernetes config", ilog.Err(err))
os.Exit(1)
}
helmParams, err := strvals.ParseString(*params)
if err != nil {
slog.Error("invalid Helm parameters", ilog.Err(err))
os.Exit(1)
}
if err := runController(ctx, kubernetesConfig, helmParams); err != nil {
slog.Error("Exit", ilog.Err(runController(ctx, kubernetesConfig, helmParams)))
os.Exit(1)
}
slog.Info("Exit")
}
func runController(ctx context.Context, cfg *rest.Config, params map[string]interface{}) error {
ctrllog.SetLogger(zap.New())
sc := runtime.NewScheme()
scheme.AddToScheme(sc)
apps.AddToScheme(sc)
registry.AddToScheme(sc)
mgr, err := manager.New(cfg, manager.Options{
Scheme: sc,
WebhookServer: webhook.NewServer(webhook.Options{CertDir: *certDir, Port: *webhookPort}),
Metrics: metricsserver.Options{BindAddress: "0"}, // disabled
HealthProbeBindAddress: fmt.Sprintf(":%d", *healthzPort),
})
if err != nil {
return errors.Wrap(err, "create controller manager")
}
if err := approllout.Add(ctx, mgr, chartutil.Values(params)); err != nil {
return errors.Wrap(err, "add AppRollout controller")
}
if err := mgr.AddHealthzCheck("trivial", healthz.Ping); err != nil {
return errors.Wrap(err, "add healthz check")
}
srv := mgr.GetWebhookServer()
srv.Register("/approllout/validate", approllout.NewAppRolloutValidationWebhook(mgr))
srv.Register("/app/validate", approllout.NewAppValidationWebhook(mgr))
return mgr.Start(signals.SetupSignalHandler())
}
================================================
FILE: src/go/cmd/chart-assignment-controller/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
load("@rules_oci//oci:defs.bzl", "oci_image")
load("@rules_pkg//pkg:tar.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = ["main.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/chart-assignment-controller",
visibility = ["//visibility:private"],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"//src/go/pkg/controller/chartassignment:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@io_k8s_apimachinery//pkg/runtime:go_default_library",
"@io_k8s_client_go//kubernetes/scheme:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/healthz:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/log:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/log/zap:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/manager:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/manager/signals:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/metrics/server:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/webhook:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@io_opencensus_go_contrib_exporter_stackdriver//:go_default_library",
],
)
genrule(
name = "helm-config",
outs = ["helm-config.tar"],
# To make the rule deterministic
# - Remove the "generated" timestamp in repositories.yaml.
# - Add some extra tar flags.
cmd = "HELM_HOME=$$PWD/.helm $(location @kubernetes_helm//:helm) >/dev/null init --client-only --skip-repos " +
"&& sed -i '/generated:/d' .helm/repository/repositories.yaml " +
"&& tar --owner=root --group=root --numeric-owner --mtime='2010-01-01' --create --file $@ .helm",
output_to_bindir = True,
tools = ["@kubernetes_helm//:helm"],
)
oci_image(
name = "helm-image",
base = "@distroless_cc",
tars = [":helm-image-layer"],
)
pkg_tar(
name = "helm-image-layer",
extension = "tar.gz",
package_dir = "/home/nonroot/",
deps = [":helm-config"],
)
go_binary(
name = "chart-assignment-controller-app",
embed = [":go_default_library"],
)
pkg_tar(
name = "chart-assignment-controller-layer",
srcs = [":chart-assignment-controller-app"],
extension = "tar.gz",
)
oci_image(
name = "chart-assignment-controller-image",
base = ":helm-image",
entrypoint = ["/chart-assignment-controller-app"],
tars = [":chart-assignment-controller-layer"],
)
================================================
FILE: src/go/cmd/chart-assignment-controller/main.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package main defines the entry point for the chart assignment controller service.
//
// Ensures selected apps are running on the robot.
package main
import (
"context"
"flag"
"fmt"
"log/slog"
"os"
"contrib.go.opencensus.io/exporter/stackdriver"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
"github.com/googlecloudrobotics/core/src/go/pkg/controller/chartassignment"
"github.com/googlecloudrobotics/ilog"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/healthz"
ctrllog "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
var (
cloudCluster = flag.Bool("cloud-cluster", true, "Is the controller deployed in cloud cluster")
healthzPort = flag.Int("healthz-port", 8080, "Listening port of the /healthz probe")
webhookEnabled = flag.Bool("webhook-enabled", true, "Whether the webhook should be served")
webhookPort = flag.Int("webhook-port", 9876, "Listening port of the custom resource webhook")
certDir = flag.String("cert-dir", "", "Directory for TLS certificates")
stackdriverProjectID = flag.String("trace-stackdriver-project-id", "", "If not empty, traces will be uploaded to this Google Cloud Project. Not relevant for cloud cluster")
maxQPS = flag.Int("apiserver-max-qps", 50, "Maximum number of calls to the API server per second.")
logLevel = flag.Int("log-level", int(slog.LevelInfo), "the log message level required to be logged")
)
func main() {
flag.Parse()
logHandler := ilog.NewLogHandler(slog.Level(*logLevel), os.Stderr)
slog.SetDefault(slog.New(logHandler))
ctx := context.Background()
if *stackdriverProjectID != "" && *cloudCluster == false {
sd, err := stackdriver.NewExporter(stackdriver.Options{
ProjectID: *stackdriverProjectID,
})
if err != nil {
slog.Error("Failed to create the Stackdriver exporter", ilog.Err(err))
os.Exit(1)
}
trace.RegisterExporter(sd)
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
defer sd.Flush()
}
var clusterName string
if *cloudCluster == true {
clusterName = "cloud"
slog.Info("Starting chart-assignment-controller in cloud setup")
} else {
clusterName = os.Getenv("ROBOT_NAME")
slog.Info("Starting chart-assigment-controller in robot setup", slog.String("Cluster", clusterName))
if clusterName == "" {
slog.Error("expect ROBOT_NAME environment var to be set to an non-empty string")
os.Exit(1)
}
}
config, err := rest.InClusterConfig()
if err != nil {
slog.Error("Failed to load config", ilog.Err(err))
os.Exit(1)
}
config.QPS = float32(*maxQPS)
// The default value of twice the max QPS seems to work well.
config.Burst = *maxQPS * 2
if err := runController(ctx, config, clusterName); err != nil {
slog.Error("Controller terminated", ilog.Err(err))
os.Exit(1)
}
slog.Info("Controller finished")
}
func runController(ctx context.Context, cfg *rest.Config, cluster string) error {
ctrllog.SetLogger(zap.New())
sc := runtime.NewScheme()
scheme.AddToScheme(sc)
apps.AddToScheme(sc)
mgr, err := manager.New(cfg, manager.Options{
Scheme: sc,
WebhookServer: webhook.NewServer(webhook.Options{CertDir: *certDir, Port: *webhookPort}),
Metrics: metricsserver.Options{BindAddress: "0"}, // disabled
HealthProbeBindAddress: fmt.Sprintf(":%d", *healthzPort),
})
if err != nil {
return errors.Wrap(err, "create controller manager")
}
if err := chartassignment.Add(ctx, mgr, *cloudCluster); err != nil {
return errors.Wrap(err, "add ChartAssignment controller")
}
if err := mgr.AddHealthzCheck("trivial", healthz.Ping); err != nil {
return errors.Wrap(err, "add healthz check")
}
if *webhookEnabled {
webhook := chartassignment.NewValidationWebhook(mgr)
srv := mgr.GetWebhookServer()
srv.Register("/chartassignment/validate", webhook)
}
return mgr.Start(signals.SetupSignalHandler())
}
================================================
FILE: src/go/cmd/cr-syncer/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
load("@rules_oci//oci:defs.bzl", "oci_image")
load("@rules_pkg//pkg:tar.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = [
"health.go",
"main.go",
"syncer.go",
],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/cr-syncer",
visibility = ["//visibility:private"],
deps = [
"//src/go/pkg/robotauth:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@com_github_motemen_go_loghttp//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@io_k8s_apiextensions_apiserver//pkg/apis/apiextensions/v1:go_default_library",
"@io_k8s_apiextensions_apiserver//pkg/client/clientset/clientset:go_default_library",
"@io_k8s_apiextensions_apiserver//pkg/client/informers/externalversions:go_default_library",
"@io_k8s_apimachinery//pkg/api/errors:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured:go_default_library",
"@io_k8s_apimachinery//pkg/runtime:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/schema:go_default_library",
"@io_k8s_apimachinery//pkg/watch:go_default_library",
"@io_k8s_client_go//dynamic:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
"@io_k8s_client_go//tools/cache:go_default_library",
"@io_k8s_client_go//util/workqueue:go_default_library",
"@io_k8s_klog//:go_default_library",
"@io_opencensus_go//plugin/ochttp:go_default_library",
"@io_opencensus_go//stats:go_default_library",
"@io_opencensus_go//stats/view:go_default_library",
"@io_opencensus_go//tag:go_default_library",
"@io_opencensus_go//zpages:go_default_library",
"@io_opencensus_go_contrib_exporter_prometheus//:go_default_library",
"@org_golang_x_net//context:go_default_library",
"@org_golang_x_oauth2//:go_default_library",
"@org_golang_x_oauth2//google:go_default_library",
],
)
go_test(
name = "go_default_test",
size = "small",
srcs = [
"health_test.go",
"main_test.go",
"syncer_test.go",
],
embed = [":go_default_library"],
visibility = ["//visibility:private"],
deps = [
"@com_github_google_go_cmp//cmp:go_default_library",
"@com_github_onsi_gomega//:go_default_library",
"@io_k8s_apiextensions_apiserver//pkg/apis/apiextensions/v1:go_default_library",
"@io_k8s_apiextensions_apiserver//pkg/client/clientset/clientset/fake:go_default_library",
"@io_k8s_apimachinery//pkg/api/errors:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured:go_default_library",
"@io_k8s_apimachinery//pkg/runtime:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/schema:go_default_library",
"@io_k8s_apimachinery//pkg/watch:go_default_library",
"@io_k8s_client_go//dynamic/fake:go_default_library",
"@io_k8s_client_go//testing:go_default_library",
"@io_k8s_client_go//tools/cache:go_default_library",
"@io_k8s_client_go//util/workqueue:go_default_library",
],
)
go_binary(
name = "cr-syncer-app",
embed = [":go_default_library"],
visibility = ["//visibility:private"],
)
pkg_tar(
name = "cr-syncer-image-layer",
srcs = [":cr-syncer-app"],
extension = "tar.gz",
)
oci_image(
name = "cr-syncer-image",
base = "@distroless_base",
entrypoint = ["/cr-syncer-app"],
tars = [":cr-syncer-image-layer"],
visibility = ["//visibility:public"],
)
================================================
FILE: src/go/cmd/cr-syncer/health.go
================================================
package main
import (
"context"
"log/slog"
"net/http"
"github.com/googlecloudrobotics/ilog"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
)
var (
// gvr defines which resource we expect to be able to list in the
// remote cluster. We check for robottypes as the cr-syncer may only be
// authorized to list syncable & unfiltered resources.
gvr = schema.GroupVersionResource{
Group: "registry.cloudrobotics.com",
Version: "v1alpha1",
Resource: "robottypes",
}
)
// handler handles HTTP health requests.
type handler struct {
ctx context.Context
client dynamic.Interface
}
func newHealthHandler(ctx context.Context, client dynamic.Interface) http.Handler {
return &handler{ctx, client}
}
func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// A simple health check: see if we can execute a list request against
// the apiserver. This might block for a while or fail due to transient
// network issues, so the liveness probe will need to be tolerant of
// slow or flaky responses.
//
// If this becomes a problem, we could do the requests in the
// background and just check the status of the latest request here.
if _, err := h.client.Resource(gvr).List(h.ctx, metav1.ListOptions{Limit: 1}); k8serrors.IsUnauthorized(err) {
slog.Error("failed health check", ilog.Err(err))
http.Error(w, "unhealthy", http.StatusInternalServerError)
return
}
}
================================================
FILE: src/go/cmd/cr-syncer/health_test.go
================================================
package main
import (
"context"
"net/http"
"net/http/httptest"
"testing"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic/fake"
k8stest "k8s.io/client-go/testing"
)
func TestHealthy(t *testing.T) {
client := fake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(),
map[schema.GroupVersionResource]string{
gvr: "RobotList",
},
)
h := newHealthHandler(context.Background(), client)
ts := httptest.NewServer(h)
defer ts.Close()
res, err := http.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != http.StatusOK {
t.Errorf("GET / returned status %d, want %d", res.StatusCode, http.StatusOK)
}
}
func TestHealthyForBadRequest(t *testing.T) {
client := fake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(),
map[schema.GroupVersionResource]string{
gvr: "RobotList",
},
)
// To avoid unwanted crashes, we should return "healthy" for misc errors.
client.PrependReactor("*", "*", func(k8stest.Action) (bool, runtime.Object, error) {
return true, nil, k8serrors.NewBadRequest("")
})
h := newHealthHandler(context.Background(), client)
ts := httptest.NewServer(h)
defer ts.Close()
res, err := http.Get(ts.URL + "/health")
if err != nil {
t.Fatal(err)
}
if res.StatusCode != http.StatusOK {
t.Errorf("GET / returned status %d, want %d", res.StatusCode, http.StatusOK)
}
}
func TestUnhealthy(t *testing.T) {
client := fake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(),
map[schema.GroupVersionResource]string{
gvr: "RobotList",
},
)
// If the token vendor gives us a bad token, we might get Unauthorized errors.
// https://github.com/googlecloudrobotics/core/issues/59
client.PrependReactor("*", "*", func(k8stest.Action) (bool, runtime.Object, error) {
return true, nil, k8serrors.NewUnauthorized("")
})
h := newHealthHandler(context.Background(), client)
ts := httptest.NewServer(h)
defer ts.Close()
res, err := http.Get(ts.URL + "/health")
if err != nil {
t.Fatal(err)
}
if res.StatusCode != http.StatusInternalServerError {
t.Errorf("GET / returned status %d, want %d", res.StatusCode, http.StatusInternalServerError)
}
}
================================================
FILE: src/go/cmd/cr-syncer/main.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The CR syncer syncs custom resources between a remote Kubernetes cluster and
// the local Kubernetes cluster. The spec part is copied from upstream to
// downstream, and the status part is copied from downstream to upstream.
//
// The behaviour can be customized by annotations on the CRDs.
//
// Annotation "filter-by-robot-name"
//
// cr-syncer.cloudrobotics.com/filter-by-robot-name:
//
// If true, only sync CRs that have a label 'cloudrobotics.com/robot-name:
// ' that matches the robot-name arg given on the command line.
//
// Annotation "status-subtree"
//
// cr-syncer.cloudrobotics.com/status-subtree:
//
// If specified, only sync the given subtree of the Status field. This is useful
// if resources have a shared status.
//
// Annotation "spec-source"
//
// cr-syncer.cloudrobotics.com/spec-source:
//
// If set to "cloud", the source of truth for object existence and specs
// (upstream) is the remote cluster and for status it's local (downstream).
// If set to "", the CRD is ignored.
//
// NOTE: Previously, this could be set to "robot", but support was removed as it
// was unused and the required auth setup is more complex, and would need
// changes to cr-syncer-auth-webhook to validate CR creation as well.
package main
import (
"flag"
"fmt"
"log/slog"
"net/http"
"os"
"strings"
"time"
"contrib.go.opencensus.io/exporter/prometheus"
"github.com/googlecloudrobotics/core/src/go/pkg/robotauth"
"github.com/googlecloudrobotics/ilog"
"github.com/motemen/go-loghttp"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"go.opencensus.io/zpages"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
crdtypes "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
crdinformer "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
)
const (
// Resync informers every 5 minutes. This will cause all current resources
// to be sent as updates once again, which will trigger reconciliation on those
// objects and thus fix any potential drift.
resyncPeriod = 5 * time.Minute
)
var (
remoteServer = flag.String("remote-server", "", "Remote Kubernetes server")
robotName = flag.String("robot-name", "", "Robot we are running on, can be used for selective syncing")
listenAddr = flag.String("listen-address", ":80", "HTTP listen address")
conflictErrorLimit = flag.Int("conflict-error-limit", 5, "Number of consecutive conflict errors before informer is restarted")
timeout = flag.Int64("timeout", 300, "Timeout for CR watch calls in seconds")
useRobotJWT = flag.Bool("use-robot-jwt", false, "Use robot JWT for authn instead of GCP access token - requires recent CRC cloud deployment")
verbose = flag.Bool("verbose", false, "DEPRECATED: Use log_level")
logLevel = flag.Int("log-level", int(slog.LevelInfo), "the log message level required to be logged")
sizeDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 33554432)
latencyDistribution = view.Distribution(0, 1, 2, 5, 10, 15, 25, 50, 100, 200, 400, 800, 1500, 3000, 6000)
tagLocation = mustNewTagKey("location")
)
func init() {
if err := view.Register(
&view.View{
Name: ochttp.ClientRequestCount.Name(),
Description: ochttp.ClientRequestCount.Description(),
Measure: ochttp.ClientRequestCount,
TagKeys: []tag.Key{ochttp.Method, tagLocation},
Aggregation: view.Count(),
},
&view.View{
Name: ochttp.ClientRequestBytes.Name(),
Description: ochttp.ClientRequestBytes.Description(),
Measure: ochttp.ClientRequestBytes,
TagKeys: []tag.Key{ochttp.Method, ochttp.StatusCode, tagLocation},
Aggregation: sizeDistribution,
},
&view.View{
Name: ochttp.ClientResponseBytes.Name(),
Description: ochttp.ClientResponseBytes.Description(),
Measure: ochttp.ClientResponseBytes,
TagKeys: []tag.Key{ochttp.Method, ochttp.StatusCode, tagLocation},
Aggregation: sizeDistribution,
},
&view.View{
Name: ochttp.ClientLatency.Name(),
Description: ochttp.ClientLatency.Description(),
Measure: ochttp.ClientLatency,
TagKeys: []tag.Key{ochttp.Method, ochttp.StatusCode, tagLocation},
Aggregation: latencyDistribution,
},
); err != nil {
panic(err)
}
}
// PrefixingRoundtripper is a HTTP roundtripper that adds a specified prefix to
// all HTTP requests. We need to use it instead of setting APIPath because
// autogenerated and dynamic Kubernetes clients overwrite the REST config's
// APIPath.
type PrefixingRoundtripper struct {
Prefix string
Base http.RoundTripper
}
func (pr *PrefixingRoundtripper) RoundTrip(r *http.Request) (*http.Response, error) {
// Avoid an extra roundtrip for the protocol upgrade
r.URL.Scheme = "https"
if !strings.HasPrefix(r.URL.Path, pr.Prefix+"/") {
r.URL.Path = pr.Prefix + r.URL.Path
}
resp, err := pr.Base.RoundTrip(r)
return resp, err
}
// ctxRoundTripper injects a fixed context into all requests. This is used to
// provide static OpenCensus tags as Kubernetes' client-go provides no context hooks.
type ctxRoundTripper struct {
base http.RoundTripper
ctx context.Context
}
func (r *ctxRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
return r.base.RoundTrip(req.WithContext(r.ctx))
}
// restConfigForRemote assembles the K8s REST config for the remote server.
func restConfigForRemote(ctx context.Context) (*rest.Config, error) {
var tokenSource oauth2.TokenSource
var err error
if *useRobotJWT {
tokenSource = robotauth.CreateJWTSource()
} else {
tokenSource, err = google.DefaultTokenSource(ctx, "https://www.googleapis.com/auth/cloud-platform")
if err != nil {
return nil, err
}
}
ctx, err = tag.New(ctx, tag.Insert(tagLocation, "remote"))
if err != nil {
return nil, err
}
transport := func(base http.RoundTripper) (rt http.RoundTripper) {
rt = &oauth2.Transport{
Source: tokenSource,
Base: base,
}
rt = &PrefixingRoundtripper{
Prefix: "/apis/core.kubernetes",
Base: rt,
}
if *verbose {
rt = &loghttp.Transport{Transport: rt}
}
rt = &ochttp.Transport{Base: rt}
return &ctxRoundTripper{base: rt, ctx: ctx}
}
return &rest.Config{
Host: *remoteServer,
APIPath: "/apis",
WrapTransport: transport,
// The original value of timeout is set in the options of lister and watcher in newInformer function. This timeout is not enforced by the client.
// That's the reason for the timeout in REST config. It is set to timeout + 5 seconds to give some time for a graceful closing of the connection.
Timeout: time.Second * (time.Duration(*timeout) + 5),
}, nil
}
type CrdChange struct {
Type watch.EventType
CRD *crdtypes.CustomResourceDefinition
}
func streamCrds(done <-chan struct{}, clientset crdclientset.Interface, crds chan<- CrdChange) error {
factory := crdinformer.NewSharedInformerFactory(clientset, 0)
informer := factory.Apiextensions().V1().CustomResourceDefinitions().Informer()
go informer.Run(done)
slog.Info("Syncing cache for CRDs")
ok := cache.WaitForCacheSync(done, informer.HasSynced)
if !ok {
return fmt.Errorf("WaitForCacheSync failed")
}
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
crds <- CrdChange{Type: watch.Added, CRD: obj.(*crdtypes.CustomResourceDefinition)}
},
UpdateFunc: func(oldObj, newObj interface{}) {
crds <- CrdChange{Type: watch.Modified, CRD: newObj.(*crdtypes.CustomResourceDefinition)}
},
DeleteFunc: func(obj interface{}) {
crds <- CrdChange{Type: watch.Deleted, CRD: obj.(*crdtypes.CustomResourceDefinition)}
},
})
return nil
}
func main() {
klog.InitFlags(nil)
flag.Parse()
ctx := context.Background()
ll := slog.Level(*logLevel)
if *verbose {
ll = slog.LevelDebug
}
logHandler := ilog.NewLogHandler(ll, os.Stderr)
slog.SetDefault(slog.New(logHandler))
localConfig, err := rest.InClusterConfig()
if err != nil {
slog.Error("InClusterConfig", ilog.Err(err))
os.Exit(1)
}
localCtx, err := tag.New(ctx, tag.Insert(tagLocation, "local"))
if err != nil {
slog.Error("tag.New", ilog.Err(err))
os.Exit(1)
}
localConfig.WrapTransport = func(base http.RoundTripper) http.RoundTripper {
if *verbose {
base = &loghttp.Transport{Transport: base}
}
base = &ochttp.Transport{Base: base}
return &ctxRoundTripper{base: base, ctx: localCtx}
}
local, err := dynamic.NewForConfig(localConfig)
if err != nil {
slog.Error("NewForConfig", ilog.Err(err))
os.Exit(1)
}
remoteConfig, err := restConfigForRemote(ctx)
if err != nil {
slog.Error("restConfigForRemote", ilog.Err(err))
os.Exit(1)
}
remote, err := dynamic.NewForConfig(remoteConfig)
if err != nil {
slog.Error("NewForConfig", ilog.Err(err))
os.Exit(1)
}
exporter, err := prometheus.NewExporter(prometheus.Options{})
if err != nil {
slog.Error("NewExporter", ilog.Err(err))
os.Exit(1)
}
view.RegisterExporter(exporter)
view.SetReportingPeriod(time.Second)
zpages.Handle(nil, "/debug")
http.Handle("/metrics", exporter)
http.Handle("/health", newHealthHandler(ctx, remote))
go func() {
if err := http.ListenAndServe(*listenAddr, nil); err != nil {
slog.Error("ListenAndServe", ilog.Err(err))
os.Exit(1)
}
}()
crds := make(chan CrdChange)
if err := streamCrds(ctx.Done(), crdclientset.NewForConfigOrDie(localConfig), crds); err != nil {
slog.Error("Unable to stream CRDs from local Kubernetes", ilog.Err(err))
os.Exit(1)
}
syncers := make(map[string]*crSyncer)
for crd := range crds {
name := crd.CRD.GetName()
if cur, ok := syncers[name]; ok {
if crd.Type == watch.Added {
slog.Warn("Already had a running sync", slog.String("syncer", name))
}
cur.stop()
delete(syncers, name)
}
if crd.Type == watch.Added || crd.Type == watch.Modified {
// The modify procedure is very heavyweight: We throw away
// the informer for the CRD (read: all cached data) on every
// modification and recreate it. If that ever turns out to
// be a problem, we should use a shared informer cache
// instead.
s, err := newCRSyncer(ctx, *crd.CRD, local, remote, *robotName)
if err != nil {
if err != errIgnoredCRD {
slog.Error("skipping custom resource", slog.String("Resource", name), ilog.Err(err))
}
continue
}
syncers[name] = s
go s.run()
}
}
}
func mustNewTagKey(s string) tag.Key {
k, err := tag.NewKey(s)
if err != nil {
panic(err)
}
return k
}
================================================
FILE: src/go/cmd/cr-syncer/main_test.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"sync"
"testing"
"time"
. "github.com/onsi/gomega"
crdtypes "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
fakecrdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
)
func TestStreamCrdsSeesPreexistingObject(t *testing.T) {
g := NewGomegaWithT(t)
items := []runtime.Object{
&crdtypes.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
ResourceVersion: "1",
},
},
}
cs := fakecrdclientset.NewSimpleClientset(items...)
var wg sync.WaitGroup
crds := make(chan CrdChange)
done := make(chan struct{})
wg.Add(1)
go func() {
defer wg.Done()
select {
case crd := <-crds:
g.Expect(crd.Type).To(Equal(watch.Added))
g.Expect(crd.CRD.GetName()).To(Equal("foo"))
case <-time.After(15 * time.Second):
t.Errorf("Received no watch event; wanted add for foo")
}
close(done)
}()
if err := streamCrds(done, cs, crds); err != nil {
t.Errorf("Got unexpected error: %v", err)
}
wg.Wait()
}
func TestStreamCrdsSeesAdditionAndDeletion(t *testing.T) {
ctx := context.Background()
g := NewGomegaWithT(t)
cs := fakecrdclientset.NewSimpleClientset()
crds := make(chan CrdChange)
done := make(chan struct{})
defer close(done)
if err := streamCrds(done, cs, crds); err != nil {
t.Errorf("Got unexpected error: %v", err)
}
cs.ApiextensionsV1().CustomResourceDefinitions().Create(ctx,
&crdtypes.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: "later",
},
},
metav1.CreateOptions{})
select {
case crd := <-crds:
g.Expect(crd.Type).To(Equal(watch.Added))
g.Expect(crd.CRD.GetName()).To(Equal("later"))
case <-time.After(15 * time.Second):
t.Errorf("Received no watch event; wanted add for later")
}
cs.ApiextensionsV1().CustomResourceDefinitions().Delete(ctx, "later", metav1.DeleteOptions{})
select {
case crd := <-crds:
g.Expect(crd.Type).To(Equal(watch.Deleted))
g.Expect(crd.CRD.GetName()).To(Equal("later"))
case <-time.After(15 * time.Second):
t.Errorf("Received no watch event; wanted deleted for later")
}
}
func TestStreamCrdsSeesUpdate(t *testing.T) {
ctx := context.Background()
g := NewGomegaWithT(t)
cs := fakecrdclientset.NewSimpleClientset()
crds := make(chan CrdChange)
done := make(chan struct{})
defer close(done)
if err := streamCrds(done, cs, crds); err != nil {
t.Errorf("Got unexpected error: %v", err)
}
cs.ApiextensionsV1().CustomResourceDefinitions().Create(ctx,
&crdtypes.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: "later",
},
},
metav1.CreateOptions{})
select {
case crd := <-crds:
g.Expect(crd.Type).To(Equal(watch.Added))
g.Expect(crd.CRD.GetName()).To(Equal("later"))
case <-time.After(15 * time.Second):
t.Errorf("Received no watch event; wanted add for later")
}
cs.ApiextensionsV1().CustomResourceDefinitions().Update(ctx,
&crdtypes.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: "later",
Annotations: map[string]string{
"foo": "bar",
},
},
},
metav1.UpdateOptions{})
select {
case crd := <-crds:
g.Expect(crd.Type).To(Equal(watch.Modified))
g.Expect(crd.CRD.GetName()).To(Equal("later"))
case <-time.After(15 * time.Second):
t.Errorf("Received no watch event; wanted modified for later")
}
}
================================================
FILE: src/go/cmd/cr-syncer/syncer.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"log/slog"
"net/http"
"os"
"strconv"
"time"
"github.com/googlecloudrobotics/ilog"
"github.com/pkg/errors"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
crdtypes "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
// Annotations attached to CRDs.
annotationStatusSubtree = "cr-syncer.cloudrobotics.com/status-subtree"
annotationFilterByRobotName = "cr-syncer.cloudrobotics.com/filter-by-robot-name"
annotationSpecSource = "cr-syncer.cloudrobotics.com/spec-source"
// Annotations and labels attached to CRs.
labelRobotName = "cloudrobotics.com/robot-name"
// Annotation for remote resource version. Note that for resources in
// the cloud cluster, this is a resource version on the robot's cluster
// (and vice versa). This will only be set when the status subresource
// is disabled, otherwise the status and annotation cannot be updated
// in a single request.
annotationResourceVersion = "cr-syncer.cloudrobotics.com/remote-resource-version"
cloudClusterName = "cloud"
)
var (
mSyncs = stats.Int64(
"cr-syncer.cloudrobotics.com/syncs",
"Synchronizations triggered by resource events",
stats.UnitDimensionless,
)
mSyncErrors = stats.Int64(
"cr-syncer.cloudrobotics.com/sync_errors",
"Synchronization errors on resource events",
stats.UnitDimensionless,
)
tagEventSource = mustNewTagKey("event_source")
tagResource = mustNewTagKey("resource")
// errIgnoredCRD indicates that the spec-source label is missing or empty
// and this CRD should be ignored.
errIgnoredCRD = errors.New("this CRD is not synced")
)
func init() {
if err := view.Register(
&view.View{
Name: "cr-syncer.cloudrobotics.com/syncs_total",
Description: "Total number of synchronizations triggered resource events",
Measure: mSyncs,
TagKeys: []tag.Key{tagEventSource, tagResource},
Aggregation: view.Count(),
},
&view.View{
Name: "cr-syncer.cloudrobotics.com/sync_errors_total",
Description: "Total number of synchronizations errors on resource events",
Measure: mSyncErrors,
TagKeys: []tag.Key{tagEventSource, tagResource},
Aggregation: view.Count(),
},
); err != nil {
panic(err)
}
}
// removeFinalizer removes the cr-syncer finalizer for this robot. Finalizers
// for offline robots have to be removed manually (eg with `kubectl edit`).
// TODO(rodrigoq): remove after migration
func removeFinalizer(ctx context.Context, client dynamic.ResourceInterface, obj *unstructured.Unstructured, clusterName string) {
update := false
thisFinalizer := fmt.Sprintf("%s.synced.cr-syncer.cloudrobotics.com", clusterName)
finalizers := []string{}
for _, x := range obj.GetFinalizers() {
if x == thisFinalizer {
update = true
} else {
finalizers = append(finalizers, x)
}
}
if !update {
return
}
obj.SetFinalizers(finalizers)
if _, err := client.Update(ctx, obj, metav1.UpdateOptions{}); err != nil {
if isNotFoundError(err) {
return
}
slog.Error("failed to remove finalizers", ilog.Err(err))
}
}
// crSyncer synchronizes custom resources from an upstream source cluster to a
// downstream cluster.
// Updates to the status subresource in the downstream are propagated back to
// the upstream cluster.
type crSyncer struct {
ctx context.Context
clusterName string // Name of downstream cluster.
crd crdtypes.CustomResourceDefinition
upstream dynamic.ResourceInterface // Source of the spec.
downstream dynamic.ResourceInterface // Source of the status.
labelSelector string
subtree string
versionIx int
// Informers and the queues they feed. Upstream/downstream describes
// the source of the change events, _not_ the direction they are heading.
// For example, upstream{Inf,Queue} receive updates that will result in the
// syncer taking actions against the downstream cluster.
upstreamInf cache.SharedIndexInformer
downstreamInf cache.SharedIndexInformer
upstreamQueue workqueue.RateLimitingInterface
downstreamQueue workqueue.RateLimitingInterface
infDone chan struct{}
conflictErrors int
done chan struct{} // Terminates all background processes.
}
func getStorageVersionIndex(crd crdtypes.CustomResourceDefinition) (int, error) {
for ix, v := range crd.Spec.Versions {
if v.Storage {
return ix, nil
}
}
return 0, fmt.Errorf("Invalid Custom Resource %s: no version with stored=true set", crd.ObjectMeta.Name)
}
func newCRSyncer(
ctx context.Context,
crd crdtypes.CustomResourceDefinition,
local, remote dynamic.Interface,
robotName string,
) (*crSyncer, error) {
var (
annotations = crd.ObjectMeta.Annotations
filterByRobotValue = annotations[annotationFilterByRobotName]
filterByRobot = false
)
if filterByRobotValue != "" {
if v, err := strconv.ParseBool(filterByRobotValue); err != nil {
slog.Error("Value must be boolean",
slog.String("Filter", annotationFilterByRobotName),
slog.String("Target", crd.ObjectMeta.Name),
slog.String("Got", filterByRobotValue))
} else {
filterByRobot = v
}
}
versionIx, err := getStorageVersionIndex(crd)
if err != nil {
return nil, errors.Wrap(err, "Bad crd passed to newCRSyncer")
}
gvr := schema.GroupVersionResource{
Group: crd.Spec.Group,
Version: crd.Spec.Versions[versionIx].Name,
Resource: crd.Spec.Names.Plural,
}
ns := ""
if crd.Spec.Scope == crdtypes.NamespaceScoped {
// TODO(https://github.com/googlecloudrobotics/core/issues/19): allow syncing CRs in other namespaces
ns = "default"
}
s := &crSyncer{
ctx: ctx,
crd: crd,
subtree: annotations[annotationStatusSubtree],
versionIx: versionIx,
upstream: remote.Resource(gvr).Namespace(ns),
downstream: local.Resource(gvr).Namespace(ns),
done: make(chan struct{}),
}
switch src := annotations[annotationSpecSource]; src {
case "":
return nil, errIgnoredCRD
case "cloud":
s.clusterName = fmt.Sprintf("robot-%s", robotName)
// Use DefaultControllerRateLimiter for queue with destination robot and ItemFastSlowRateLimiter for queue with destination cloud to improve resilience regarding network errors
// Upstream destination is robot cluster, downstream destination is cloud cluster
s.upstreamQueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "upstream")
s.downstreamQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemFastSlowRateLimiter(time.Millisecond*500, time.Second*5, 5), "downstream")
default:
return nil, fmt.Errorf("unknown spec source %q", src)
}
if filterByRobot {
if robotName != "" {
s.labelSelector = labelRobotName + "=" + robotName
} else {
// TODO(fabxc): should this return an error instead?
slog.Warn("request to filter by robot-name, but no robot-name was given to cr-syncer", slog.String("Requester", crd.ObjectMeta.Name))
}
}
s.upstreamInf = s.newInformer(s.upstream)
s.downstreamInf = s.newInformer(s.downstream)
return s, nil
}
func (s *crSyncer) newInformer(client dynamic.ResourceInterface) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.LabelSelector = s.labelSelector
options.TimeoutSeconds = timeout
return client.List(s.ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = s.labelSelector
options.TimeoutSeconds = timeout
return client.Watch(s.ctx, options)
},
},
&unstructured.Unstructured{},
resyncPeriod,
nil,
)
}
func (s *crSyncer) startInformers() error {
if s.infDone != nil {
return fmt.Errorf("informer for %s already started", s.crd.GetName())
}
s.infDone = make(chan struct{})
go s.upstreamInf.Run(s.infDone)
go s.downstreamInf.Run(s.infDone)
if ok := cache.WaitForCacheSync(s.infDone, s.upstreamInf.HasSynced); !ok {
return fmt.Errorf("stopped while syncing upstream informer for %s", s.crd.GetName())
}
if ok := cache.WaitForCacheSync(s.infDone, s.downstreamInf.HasSynced); !ok {
return fmt.Errorf("stopped while syncing downstream informer for %s", s.crd.GetName())
}
s.setupInformerHandlers(s.upstreamInf, s.upstreamQueue, "upstream")
s.setupInformerHandlers(s.downstreamInf, s.downstreamQueue, "downstream")
return nil
}
func (s *crSyncer) stopInformers() {
if s.infDone != nil {
close(s.infDone)
s.infDone = nil
}
}
func (s *crSyncer) restartInformers() error {
s.stopInformers()
s.upstreamInf = s.newInformer(s.upstream)
s.downstreamInf = s.newInformer(s.downstream)
return s.startInformers()
}
func (s *crSyncer) setupInformerHandlers(
inf cache.SharedIndexInformer,
queue workqueue.RateLimitingInterface,
direction string,
) {
receive := func(obj interface{}, action string) {
u := obj.(*unstructured.Unstructured)
slog.Debug("Got Event",
slog.String("Event", action),
slog.String("Direction", direction),
slog.String("Kind", u.GetKind()),
slog.String("Name", u.GetName()),
slog.String("Version", u.GetResourceVersion()))
if key, ok := keyFunc(obj); ok {
queue.AddRateLimited(key)
}
}
inf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
receive(obj, "add")
},
UpdateFunc: func(_, obj interface{}) {
receive(obj, "update")
},
DeleteFunc: func(obj interface{}) {
receive(obj, "delete")
},
})
}
func (s *crSyncer) processNextWorkItem(
ctx context.Context,
q workqueue.RateLimitingInterface,
syncf func(string) error,
qName string,
) bool {
key, quit := q.Get()
if quit {
return false
}
defer q.Done(key)
// Restart informers on too many conflict errors
// client-go does not reliably recognize when watch calls are closed by remote API server
// cr-syncer is able to detect that when updating CRs on remote API server when there are multiple subsequent conflict errors (HTTP 409)
// like "...please apply your changes to the latest version and try again"
// This could occur at watchers of single CRDs while others keep working. Thus, it is less resource intensive just restarting informers of the affected CRDs rather than whoel cr-syncer
// Errors are counted in syncUpstream and syncDownstream functions
if s.conflictErrors >= *conflictErrorLimit {
slog.Info("Restarting informers because of too many conflict errors", slog.String("CRD", s.crd.GetName()))
err := s.restartInformers()
if err != nil {
slog.Warn("Restarting informers failed", slog.String("CRD", s.crd.GetName()))
q.AddRateLimited(key)
return true
} else {
s.conflictErrors = 0
}
}
ctx, err := tag.New(ctx, tag.Insert(tagEventSource, qName))
if err != nil {
panic(err)
}
err = syncf(key.(string))
stats.Record(ctx, mSyncs.M(1))
if err == nil {
q.Forget(key)
return true
}
// Synchronization failed, retry later.
stats.Record(ctx, mSyncErrors.M(1))
slog.Warn("Syncing key from queue failed",
slog.Any("Key", key),
slog.String("Queue", qName),
ilog.Err(err))
q.AddRateLimited(key)
return true
}
func (s *crSyncer) run() {
defer s.upstreamQueue.ShutDown()
defer s.downstreamQueue.ShutDown()
slog.Info("Starting syncer", slog.String("CRD", s.crd.GetName()))
// Start informers that will populate their associated workqueue.
if err := s.startInformers(); err != nil {
slog.Warn("Starting informers failed", slog.String("CRD", s.crd.GetName()), ilog.Err(err))
return
}
ctx, err := tag.New(context.Background(), tag.Insert(tagResource, s.crd.Name))
if err != nil {
panic(err)
}
// Process the upstream and downstream work queues.
go func() {
for s.processNextWorkItem(ctx, s.upstreamQueue, s.syncUpstream, "upstream") {
}
}()
go func() {
for s.processNextWorkItem(ctx, s.downstreamQueue, s.syncDownstream, "downstream") {
}
}()
<-s.done
// Close informers
if s.infDone != nil {
close(s.infDone)
}
}
func (s *crSyncer) stop() {
slog.Info("Stopping syncer", slog.String("CRD", s.crd.GetName()))
close(s.done)
}
// syncDownstream reconciles state after receiving change events from the
// downstream cluster. It synchronizes the status from the downstream to the
// upstream cluster, and deletes orphaned downstream resources.
func (s *crSyncer) syncDownstream(key string) error {
v := s.crd.Spec.Versions[s.versionIx]
statusIsSubresource := v.Subresources != nil && v.Subresources.Status != nil
// Get the downstream status (src) and upstream spec (dst).
srcObj, srcExists, err := s.downstreamInf.GetIndexer().GetByKey(key)
if err != nil {
return fmt.Errorf("failed to retrieve resource for key %s: %s", key, err)
}
if !srcExists {
// The downstream resource has been deleted: possibly because
// the upstream resource was deleted and recreated. Add this to
// the upstream queue so that syncUpstream() can check if it needs
// to recreate the downstream resource.
s.upstreamQueue.Add(key)
return nil
}
src := srcObj.(*unstructured.Unstructured).DeepCopy()
removeFinalizer(s.ctx, s.downstream, src, s.clusterName)
dstObj, dstExists, err := s.upstreamInf.GetIndexer().GetByKey(key)
if err != nil {
return fmt.Errorf("failed to retrieve resource for key %s: %s", key, err)
}
// If the upstream resource no longer exists, delete the downstream
// resource. Normally, this occurs when syncUpstream() handles the
// upstream deletion, but if the resource was deleted when the robot
// was offline, upstream doesn't know about the old resource and we'll
// hit this condition.
if !dstExists {
if src.GetDeletionTimestamp() != nil {
return nil // Already being deleted.
}
if err := s.downstream.Delete(s.ctx, src.GetName(), metav1.DeleteOptions{}); err != nil {
if isNotFoundError(err) {
return nil
}
return fmt.Errorf("delete resource: %s", err)
}
return nil
}
dst := dstObj.(*unstructured.Unstructured).DeepCopy()
// Copy full status or subtree from src to dst.
if s.subtree == "" {
copyStatus(dst, src)
} else if src.Object["status"] != nil {
srcStatus, ok := src.Object["status"].(map[string]interface{})
if !ok {
return fmt.Errorf("Expected status of %s in downstream cluster to be a dict", src.GetName())
}
if dst.Object["status"] == nil {
dst.Object["status"] = make(map[string]interface{})
}
dstStatus, ok := dst.Object["status"].(map[string]interface{})
if !ok {
return fmt.Errorf("Expected status of %s in upstream cluster to be a dict", src.GetName())
}
if srcStatus[s.subtree] != nil {
dstStatus[s.subtree] = srcStatus[s.subtree]
} else {
delete(dstStatus, s.subtree)
}
}
setAnnotation(dst, annotationResourceVersion, src.GetResourceVersion())
// We need to make a dedicated UpdateStatus call if the status is defined
// as an explicit subresource of the CRD.
if statusIsSubresource {
// Status must not be null/nil.
if dst.Object["status"] == nil {
dst.Object["status"] = struct{}{}
}
updated, err := s.upstream.UpdateStatus(s.ctx, dst, metav1.UpdateOptions{})
if err != nil {
// Count subsequent conflict errors
if k8serrors.IsConflict(err) && s.clusterName != cloudClusterName {
s.conflictErrors += 1
}
return newAPIErrorf(dst, "update status failed: %s", err)
}
dst = updated
} else {
updated, err := s.upstream.Update(s.ctx, dst, metav1.UpdateOptions{})
if err != nil {
// Count subsequent conflict errors
if k8serrors.IsConflict(err) && s.clusterName != cloudClusterName {
s.conflictErrors += 1
}
return newAPIErrorf(dst, "update failed: %s", err)
}
dst = updated
}
// Reset error count
if s.clusterName != cloudClusterName {
s.conflictErrors = 0
}
slog.Debug("Copied status to upstream",
slog.String("Kind", src.GetKind()),
slog.String("Name", src.GetName()),
slog.Any("Source version", src.GetResourceVersion()),
slog.Any("Destination version", dst.GetResourceVersion()))
return nil
}
// syncUpstream reconciles the state after receiving a change event from upstream.
// It synchronizes the spec changes from upstream to the downstream cluster and propagates
// deletions.
func (s *crSyncer) syncUpstream(key string) error {
// Get the upstream spec (src) and downstream status (dst).
src := &unstructured.Unstructured{make(map[string]interface{})}
dst := &unstructured.Unstructured{make(map[string]interface{})}
srcObj, srcExists, err := s.upstreamInf.GetIndexer().GetByKey(key)
if err != nil {
return fmt.Errorf("failed to retrieve resource for key %s: %s", key, err)
}
if srcExists {
src = srcObj.(*unstructured.Unstructured).DeepCopy()
removeFinalizer(s.ctx, s.upstream, src, s.clusterName)
}
dstObj, dstExists, err := s.downstreamInf.GetIndexer().GetByKey(key)
if err != nil {
return fmt.Errorf("failed to retrieve resource for key %s: %s", key, err)
}
if dstExists {
dst = dstObj.(*unstructured.Unstructured).DeepCopy()
}
// Check if the downstream resource (dst) should be created, updated,
// or deleted. If we don't need to create/update dst, return early.
var createOrUpdate func(*unstructured.Unstructured) (*unstructured.Unstructured, error)
switch {
case !srcExists && !dstExists:
// Both deleted, nothing to do.
return nil
case srcExists && !dstExists:
// Create object and set base fields.
createOrUpdate = func(o *unstructured.Unstructured) (*unstructured.Unstructured, error) {
o.SetGroupVersionKind(src.GroupVersionKind())
o.SetNamespace(src.GetNamespace())
o.SetName(src.GetName())
// Copy upstream status on initial creation.
o.Object["status"] = src.Object["status"]
return s.downstream.Create(s.ctx, o, metav1.CreateOptions{})
}
case srcExists && dstExists:
// Update dst.
createOrUpdate = func(o *unstructured.Unstructured) (*unstructured.Unstructured, error) {
return s.downstream.Update(s.ctx, o, metav1.UpdateOptions{})
}
case !srcExists && dstExists:
// Delete dst.
if err := s.downstream.Delete(s.ctx, dst.GetName(), metav1.DeleteOptions{}); err != nil {
if isNotFoundError(err) {
return nil
}
return newAPIErrorf(dst, "downstream delete failed: %s", err)
}
return nil
default:
slog.Error("unhandled condition",
slog.Bool("srcExists", srcExists),
slog.Bool("dstExists", dstExists))
os.Exit(1)
return nil
}
// Before creating/updating, check if deletion is in progress. This
// is checked separately to src/dstExists for readability (hopefully).
if src.GetDeletionTimestamp() != nil {
if err := s.downstream.Delete(s.ctx, src.GetName(), metav1.DeleteOptions{}); err != nil {
if isNotFoundError(err) {
return nil
}
return newAPIErrorf(dst, "downstream delete failed: %s", err)
}
return nil
}
// Create/update dst with the labels+annotations+spec of src.
dst.SetLabels(src.GetLabels())
dst.SetAnnotations(src.GetAnnotations())
dst.Object["spec"] = src.Object["spec"]
// The remote-resource-version annotation is removed from dst to
// prevent an infinite loop, because changing the annotation would
// change the resource version.
deleteAnnotation(dst, annotationResourceVersion)
if _, err = createOrUpdate(dst); err != nil {
// Count subsequent conflict errors
if k8serrors.IsConflict(err) && s.clusterName == cloudClusterName {
s.conflictErrors += 1
}
return newAPIErrorf(dst, "failed to create or update downstream: %s", err)
}
// Reset error count
if s.clusterName == cloudClusterName {
s.conflictErrors = 0
}
return nil
}
func isNotFoundError(err error) bool {
status, ok := err.(*k8serrors.StatusError)
return ok && status.ErrStatus.Code == http.StatusNotFound
}
type apiError struct {
o *unstructured.Unstructured
msg string
}
func (e apiError) Error() string {
return fmt.Sprintf("%s %s/%s @ %s: %s", e.o.GetKind(), e.o.GetNamespace(), e.o.GetName(), e.o.GetResourceVersion(), e.msg)
}
func newAPIErrorf(o *unstructured.Unstructured, format string, args ...interface{}) apiError {
return apiError{o: o, msg: fmt.Sprintf(format, args...)}
}
// keyFunc extracts a key of the form [/] from a resource
// which is used to access the informer's store and index.
func keyFunc(obj interface{}) (string, bool) {
k, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
slog.Warn("deriving key failed", ilog.Err(err))
return k, false
}
return k, true
}
func setAnnotation(o *unstructured.Unstructured, key, value string) {
annotations := o.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
annotations[key] = value
o.SetAnnotations(annotations)
}
func deleteAnnotation(o *unstructured.Unstructured, key string) {
annotations := o.GetAnnotations()
if annotations != nil {
delete(annotations, key)
}
if len(annotations) > 0 {
o.SetAnnotations(annotations)
} else {
o.SetAnnotations(nil)
}
}
func copyStatus(dst, src *unstructured.Unstructured) {
dst.Object["status"] = src.DeepCopy().Object["status"]
// If this CR uses the observedGeneration convention, ensure that we
// preserve the **equality** between generation and observedGeneration,
// since the generations themselves will differ between local and remote.
srcStatus, ok := src.Object["status"].(map[string]interface{})
if !ok {
// Status is not a dict => no observedGeneration.
return
}
dstStatus := dst.Object["status"].(map[string]interface{})
if srcOG, ok := srcStatus["observedGeneration"].(int64); ok {
if src.GetGeneration() == srcOG {
dstStatus["observedGeneration"] = dst.GetGeneration()
} else {
// The controller of this CR has not observed the latest generation.
dstStatus["observedGeneration"] = 0
}
}
}
================================================
FILE: src/go/cmd/cr-syncer/syncer_test.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"reflect"
"testing"
"time"
"github.com/google/go-cmp/cmp"
crdtypes "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
k8sfake "k8s.io/client-go/dynamic/fake"
k8stest "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
// filterReadActions drops read-only actions that we don't care about to verify
// the correct behavior.
func filterReadActions(actions []k8stest.Action) (ret []k8stest.Action) {
for _, a := range actions {
if v := a.GetVerb(); v == "watch" || v == "list" || v == "get" {
continue
}
ret = append(ret, a)
}
return ret
}
type fixture struct {
*testing.T
local *k8sfake.FakeDynamicClient
remote *k8sfake.FakeDynamicClient
// Starting state the respective client will report.
remoteObjects []runtime.Object
localObjects []runtime.Object
// Actions we want to see called against the respective client.
remoteActions []k8stest.Action
localActions []k8stest.Action
}
func newFixture(t *testing.T) *fixture {
return &fixture{T: t}
}
func (f *fixture) newCRSyncer(crd crdtypes.CustomResourceDefinition, robotName string) (*crSyncer, schema.GroupVersionResource) {
gvk := schema.GroupVersionKind{
Group: crd.Spec.Group,
Version: crd.Spec.Versions[0].Name,
Kind: crd.Spec.Names.Kind,
}
s := runtime.NewScheme()
s.AddKnownTypeWithName(gvk, &unstructured.Unstructured{})
f.local = k8sfake.NewSimpleDynamicClientWithCustomListKinds(s,
map[schema.GroupVersionResource]string{
{
Group: crd.Spec.Group,
Version: crd.Spec.Versions[0].Name,
Resource: crd.Spec.Names.Plural,
}: fmt.Sprintf("%sList", gvk.Kind),
},
f.localObjects...,
)
f.remote = k8sfake.NewSimpleDynamicClientWithCustomListKinds(s,
map[schema.GroupVersionResource]string{
{
Group: crd.Spec.Group,
Version: crd.Spec.Versions[0].Name,
Resource: crd.Spec.Names.Plural,
}: fmt.Sprintf("%sList", gvk.Kind),
},
f.remoteObjects...,
)
crs, err := newCRSyncer(context.Background(), crd, f.local, f.remote, robotName)
if err != nil {
f.Fatal(err)
}
return crs, schema.GroupVersionResource{
Group: crd.Spec.Group,
Version: crd.Spec.Versions[0].Name,
Resource: crd.Spec.Names.Plural,
}
}
func (f *fixture) addLocalObjects(objs ...runtime.Object) {
f.localObjects = append(f.localObjects, objs...)
}
func (f *fixture) addRemoteObjects(objs ...runtime.Object) {
f.remoteObjects = append(f.remoteObjects, objs...)
}
func (f *fixture) expectLocalActions(as ...k8stest.Action) {
f.localActions = append(f.localActions, as...)
}
func (f *fixture) expectRemoteActions(as ...k8stest.Action) {
f.remoteActions = append(f.remoteActions, as...)
}
func (f *fixture) verifyWriteActions() {
var (
localWrites = filterReadActions(f.local.Actions())
remoteWrites = filterReadActions(f.remote.Actions())
)
if diff := cmp.Diff(localWrites, f.localActions); diff != "" {
f.Errorf("local writes did not match (-want +got):\n%s", diff)
}
if diff := cmp.Diff(remoteWrites, f.remoteActions); diff != "" {
f.Errorf("remote writes did not match (-want +got):\n%s", diff)
}
}
// testCRD returns a basic resource definition we use for custom testing.
// It may be altered for specific tests.
// By default it has set the spec-source annotation to "cloud".
func testCRD(scope crdtypes.ResourceScope) crdtypes.CustomResourceDefinition {
return crdtypes.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: "goals.crds.example.com",
Labels: map[string]string{},
Annotations: map[string]string{
annotationSpecSource: "cloud",
},
},
Spec: crdtypes.CustomResourceDefinitionSpec{
Group: "crds.example.com",
Names: crdtypes.CustomResourceDefinitionNames{
Kind: "Goal",
Singular: "goal",
Plural: "goals",
},
Scope: scope,
Versions: []crdtypes.CustomResourceDefinitionVersion{{
Name: "v1",
Served: true,
Storage: true,
}},
},
}
}
// newTestCR creates a new custom resource that matches the definition of testCRD("Namespaced").
func newTestCR(name string, spec, status interface{}) *unstructured.Unstructured {
o := &unstructured.Unstructured{}
o.SetKind("Goal")
o.SetAPIVersion("crds.example.com/v1")
o.SetNamespace(metav1.NamespaceDefault)
o.SetName(name)
o.Object["spec"] = spec
o.Object["status"] = status
return o
}
// newClusterScopedTestCR creates a new custom resource that matches the definition of testCRD("Cluster").
func newClusterScopedTestCR(name string, spec, status interface{}) *unstructured.Unstructured {
o := &unstructured.Unstructured{}
o.SetKind("Goal")
o.SetAPIVersion("crds.example.com/v1")
o.SetName(name)
o.Object["spec"] = spec
o.Object["status"] = status
return o
}
func TestSyncUpstream_createSpec(t *testing.T) {
crd := testCRD(crdtypes.NamespaceScoped)
f := newFixture(t)
// When an upstream resource is seen for the first time, it should be
// created in the downstream cluster including its current status.
tcrRemote := newTestCR("resource1", "spec1", "status1")
f.addRemoteObjects(tcrRemote)
crs, gvr := f.newCRSyncer(crd, "cluster1")
defer crs.stop()
crs.startInformers()
if err := crs.syncUpstream("default/resource1"); err != nil {
t.Fatal(err)
}
var (
tcrLocalNew = newTestCR("resource1", "spec1", "status1")
)
f.expectLocalActions(k8stest.NewCreateAction(gvr, "default", tcrLocalNew))
f.verifyWriteActions()
}
func TestSyncClusterScopedCRUpstream_createSpec(t *testing.T) {
crd := testCRD(crdtypes.ClusterScoped)
f := newFixture(t)
// When an upstream resource is seen for the first time, it should be
// created in the downstream cluster including its current status.
tcrRemote := newClusterScopedTestCR("resource1", "spec1", "status1")
f.addRemoteObjects(tcrRemote)
crs, gvr := f.newCRSyncer(crd, "cluster1")
defer crs.stop()
crs.startInformers()
if err := crs.syncUpstream("resource1"); err != nil {
t.Fatal(err)
}
var (
tcrLocalNew = newClusterScopedTestCR("resource1", "spec1", "status1")
)
f.expectLocalActions(k8stest.NewCreateAction(gvr, "", tcrLocalNew))
f.verifyWriteActions()
}
func TestSyncUpstream_updateSpec(t *testing.T) {
crd := testCRD(crdtypes.NamespaceScoped)
f := newFixture(t)
// On upstream update, the spec in the downstream cluster should be adjusted.
var (
tcrLocal = newTestCR("resource1", "spec1", "status2")
tcrRemote = newTestCR("resource1", "spec2", "status1")
)
f.addLocalObjects(tcrLocal)
f.addRemoteObjects(tcrRemote)
crs, gvr := f.newCRSyncer(crd, "cluster1")
defer crs.stop()
crs.startInformers()
if err := crs.syncUpstream("default/resource1"); err != nil {
t.Fatal(err)
}
var (
tcrLocalNew = newTestCR("resource1", "spec2", "status2")
)
f.expectLocalActions(k8stest.NewUpdateAction(gvr, "default", tcrLocalNew))
f.verifyWriteActions()
}
func TestSyncUpstream_propagateDelete(t *testing.T) {
crd := testCRD(crdtypes.NamespaceScoped)
f := newFixture(t)
var (
now = metav1.Now()
tcrLocal = newTestCR("resource1", "spec1", "status1")
tcrRemote = newTestCR("resource1", "spec1", "status1")
)
tcrRemote.SetDeletionTimestamp(&now)
f.addLocalObjects(tcrLocal)
f.addRemoteObjects(tcrRemote)
crs, gvr := f.newCRSyncer(crd, "cluster1")
defer crs.stop()
crs.startInformers()
if err := crs.syncUpstream("default/resource1"); err != nil {
t.Fatal(err)
}
f.expectLocalActions(
k8stest.NewDeleteAction(gvr, "default", "resource1"),
)
f.verifyWriteActions()
}
func TestSyncDownstream_deleteOrphan(t *testing.T) {
crd := testCRD(crdtypes.NamespaceScoped)
f := newFixture(t)
// We have a local resource that has no matching resource in the upstream cluster.
// Trying to sync it again should delete the local copy.
tcrLocal := newTestCR("resource1", "spec1", "status1")
f.addLocalObjects(tcrLocal)
crs, gvr := f.newCRSyncer(crd, "cluster1")
defer crs.stop()
crs.startInformers()
if err := crs.syncDownstream("default/resource1"); err != nil {
t.Fatal(err)
}
f.expectLocalActions(
k8stest.NewDeleteAction(gvr, "default", "resource1"),
)
f.verifyWriteActions()
}
func TestSyncDownstream_statusFull(t *testing.T) {
crd := testCRD(crdtypes.NamespaceScoped)
f := newFixture(t)
var (
tcrLocal = newTestCR("resource1", "spec1", "status2")
tcrRemote = newTestCR("resource1", "spec1", "status1")
)
tcrLocal.SetResourceVersion("123")
f.addLocalObjects(tcrLocal)
f.addRemoteObjects(tcrRemote)
crs, gvr := f.newCRSyncer(crd, "")
defer crs.stop()
crs.startInformers()
if err := crs.syncDownstream("default/resource1"); err != nil {
t.Fatal(err)
}
tcrRemoteNew := newTestCR("resource1", "spec1", "status2")
tcrRemoteNew.SetAnnotations(map[string]string{
annotationResourceVersion: "123",
})
f.expectRemoteActions(k8stest.NewUpdateAction(gvr, "default", tcrRemoteNew))
f.verifyWriteActions()
}
func TestSyncDownstream_statusWithObservedGeneration(t *testing.T) {
crd := testCRD(crdtypes.NamespaceScoped)
f := newFixture(t)
// If (and only if) generation==observedGeneration for the local resource,
// the cr-syncer should adjust observedGeneration to match for the remote
// resource:
// - local: generation = 3, observedGeneration = 3
// - remote (before test): generation = 2, observedGeneration = 1
// - remote (after test): generation = 2, observedGeneration = 2
tcrLocal := newTestCR("resource1", "spec1", map[string]any{"observedGeneration": int64(3)})
tcrRemote := newTestCR("resource1", "spec1", map[string]any{"observedGeneration": int64(1)})
tcrLocal.SetResourceVersion("123")
tcrLocal.SetGeneration(3)
tcrRemote.SetGeneration(2)
f.addLocalObjects(tcrLocal)
f.addRemoteObjects(tcrRemote)
crs, gvr := f.newCRSyncer(crd, "")
defer crs.stop()
crs.startInformers()
if err := crs.syncDownstream("default/resource1"); err != nil {
t.Fatal(err)
}
// Expect that tcrRemoteNew's observedGeneration is changed to match its
// generation.
tcrRemoteNew := newTestCR("resource1", "spec1", map[string]any{"observedGeneration": int64(2)})
tcrRemoteNew.SetGeneration(2)
tcrRemoteNew.SetAnnotations(map[string]string{
annotationResourceVersion: "123",
})
f.expectRemoteActions(k8stest.NewUpdateAction(gvr, "default", tcrRemoteNew))
f.verifyWriteActions()
}
func TestSyncDownstream_statusSubtree(t *testing.T) {
crd := testCRD(crdtypes.NamespaceScoped)
f := newFixture(t)
var (
tcrLocal = newTestCR("resource1", "spec1", map[string]interface{}{
"cloud": "cloud_1",
"robot": "robot_2",
})
tcrRemote = newTestCR("resource1", "spec1", map[string]interface{}{
"cloud": "cloud_2",
"robot": "robot_1",
})
)
tcrLocal.SetResourceVersion("123")
f.addLocalObjects(tcrLocal)
f.addRemoteObjects(tcrRemote)
crs, gvr := f.newCRSyncer(crd, "")
defer crs.stop()
crs.subtree = "robot"
crs.startInformers()
if err := crs.syncDownstream("default/resource1"); err != nil {
t.Fatal(err)
}
tcrRemoteNew := newTestCR("resource1", "spec1", map[string]interface{}{
"cloud": "cloud_2",
"robot": "robot_2",
})
tcrRemoteNew.SetAnnotations(map[string]string{
annotationResourceVersion: "123",
})
f.expectRemoteActions(k8stest.NewUpdateAction(gvr, "default", tcrRemoteNew))
f.verifyWriteActions()
}
func TestSyncDownstream_downstreamNotFound(t *testing.T) {
crd := testCRD(crdtypes.NamespaceScoped)
f := newFixture(t)
// If the downstream resource is not present when synced, the upstream
// resource should be added to the upstream queue, so that syncUpstream
// can recreate the downstream resource. This tests the case where the
// upstream resource was deleted and immediately recreated.
var (
tcrRemote = newTestCR("resource1", "spec1", "status1")
)
f.addRemoteObjects(tcrRemote)
crs, _ := f.newCRSyncer(crd, "cluster1")
defer crs.stop()
crs.startInformers()
// startInformers adds the initial state to the upstream queue. Ignore
// it, so that we can check that the same resource is requeued.
upstreamChannel := channelFromQueue(t, crs.upstreamQueue, crs.upstreamInf)
select {
case <-upstreamChannel:
// Ignore.
case <-time.After(5 * time.Second):
t.Errorf("upstream resource was not queued by informer; want %v", tcrRemote)
}
if err := crs.syncDownstream("default/resource1"); err != nil {
t.Fatal(err)
}
// syncDownstream should have requeued the upstream resource.
select {
case got := <-upstreamChannel:
if !reflect.DeepEqual(got, tcrRemote) {
t.Errorf("upstream queue got %v; want %v", got, tcrRemote)
}
case <-time.After(5 * time.Second):
t.Errorf("upstream resource was not requeued to %p; want %v", crs.upstreamQueue, tcrRemote)
}
// We don't need to call syncUpstream here, as this is tested by
// TestSyncUpstream_createSpec.
}
func TestCRSyncer_populateWorkqueue(t *testing.T) {
crd := testCRD(crdtypes.NamespaceScoped)
f := newFixture(t)
cr1 := newTestCR("cr1", "spec1", "status1")
f.addRemoteObjects(cr1)
crs, _ := f.newCRSyncer(crd, "")
defer crs.stop()
crs.startInformers()
// Workqueue exposes no interface to select{} over, so we call Get()
// in a goroutine to surface deadlocks properly.
channel := make(chan interface{}, 1)
go func() {
key, quit := crs.upstreamQueue.Get()
if quit {
t.Errorf("unexpected quit")
return
}
item, exists, err := crs.upstreamInf.GetIndexer().GetByKey(key.(string))
if err != nil {
t.Errorf("unexpected lookup error for key %s: %s", key, err)
}
if !exists {
t.Errorf("item for key %s does not exist", key)
} else {
channel <- item
}
}()
select {
case obj := <-channel:
if got := obj.(*unstructured.Unstructured); !reflect.DeepEqual(got, cr1) {
t.Errorf("unexpected object; want %v; got %v", cr1, got)
}
case <-time.After(5 * time.Second):
t.Errorf("Received no watch event; wanted %v", cr1)
}
}
func TestCRSyncer_populateWorkqueueWithFilter(t *testing.T) {
crd := testCRD(crdtypes.NamespaceScoped)
crd.ObjectMeta.Annotations[annotationFilterByRobotName] = "true"
f := newFixture(t)
// Create three CRs of which only one matches the robot the CR syncer
// is running on.
crCorrectRobot := newTestCR("cr1", "spec1", "status1")
crWrongRobot := newTestCR("cr2", "spec2", "status2")
crNoRobot := newTestCR("cr3", "spec3", "status3")
crCorrectRobot.SetLabels(map[string]string{labelRobotName: "robot-1"})
crWrongRobot.SetLabels(map[string]string{labelRobotName: "robot-2"})
f.addRemoteObjects(crCorrectRobot, crWrongRobot, crNoRobot)
crs, _ := f.newCRSyncer(crd, "robot-1")
defer crs.stop()
crs.startInformers()
channel := channelFromQueue(t, crs.upstreamQueue, crs.upstreamInf)
select {
case got := <-channel:
if !reflect.DeepEqual(got, crCorrectRobot) {
t.Errorf("unexpected object; want %v; got %v", crCorrectRobot, got)
}
case <-time.After(5 * time.Second):
t.Errorf("Received no watch event; wanted %v", crCorrectRobot)
}
// No other items should come through.
select {
case item := <-channel:
t.Errorf("Unexpected update: %v", item)
case <-time.After(3 * time.Second):
}
}
func channelFromQueue(t *testing.T, queue workqueue.Interface, inf cache.SharedIndexInformer) <-chan *unstructured.Unstructured {
ch := make(chan *unstructured.Unstructured, 1)
go func() {
defer close(ch)
for {
key, quit := queue.Get()
if quit {
t.Errorf("unexpected quit")
return
}
item, exists, err := inf.GetIndexer().GetByKey(key.(string))
if err != nil {
t.Errorf("unexpected lookup error for key %s: %s", key, err)
}
if !exists {
t.Errorf("item for key %s does not exist", key)
} else {
ch <- item.(*unstructured.Unstructured)
queue.Done(key)
}
}
}()
return ch
}
================================================
FILE: src/go/cmd/cr-syncer-auth-webhook/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
load("@rules_oci//oci:defs.bzl", "oci_image")
load("@rules_pkg//pkg:tar.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = [
"main.go",
"request.go",
],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/cr-syncer-auth-webhook",
visibility = ["//visibility:private"],
deps = [
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@org_golang_x_oauth2//jws:go_default_library",
],
)
go_binary(
name = "cr-syncer-auth-webhook-app",
embed = [":go_default_library"],
)
pkg_tar(
name = "cr-syncer-auth-webhook-layer",
srcs = [":cr-syncer-auth-webhook-app"],
extension = "tar.gz",
)
oci_image(
name = "cr-syncer-auth-webhook-image",
base = "@distroless_base",
entrypoint = ["/cr-syncer-auth-webhook-app"],
tars = [":cr-syncer-auth-webhook-layer"],
)
go_test(
name = "go_default_test",
srcs = ["request_test.go"],
embed = [":go_default_library"],
deps = ["@com_github_google_go_cmp//cmp:go_default_library"],
)
================================================
FILE: src/go/cmd/cr-syncer-auth-webhook/main.go
================================================
// Copyright 2025 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The cr-syncer-auth-webhook verifies that requests from the cr-syncer are
// limited to the robot named in the credentials.
package main
import (
"context"
"flag"
"fmt"
"io"
"log/slog"
"net/http"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/pkg/errors"
"golang.org/x/oauth2/jws"
"github.com/googlecloudrobotics/ilog"
)
var (
port = flag.Int("port", 8080,
"Listening port for HTTP requests")
acceptLegacyCredentials = flag.Bool("accept-legacy-service-account-credentials", false,
"Whether to accept legacy GCP service account credentials")
tokenVendor = flag.String("token-vendor", "http://token-vendor.app-token-vendor.svc.cluster.local",
"Hostname of the token-vendor service")
logLevel = flag.Int("log-level", int(slog.LevelInfo),
"the log message level required to be logged")
)
const (
verifyJWTEndpoint = "/apis/core.token-vendor/v1/jwt.verify"
legacyTokenPrefix = "ya29."
)
type handlers struct {
client *http.Client
}
func newHandlers() handlers {
return handlers{
client: &http.Client{},
}
}
func (h *handlers) health(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
// verifyJWT delegates to the token-vendor to verify the signature of the JWT
// matches the public key of the robot.
func (h *handlers) verifyJWT(encodedJWT string) error {
if strings.HasPrefix(encodedJWT, legacyTokenPrefix) {
// We can avoid the unnecessary request when the client is using a GCP
// access token.
return fmt.Errorf("legacy token format")
}
req, err := http.NewRequest("GET", *tokenVendor+verifyJWTEndpoint, nil)
if err != nil {
return fmt.Errorf("create request: %w", err)
}
req.Header.Add("Authorization", "Bearer "+encodedJWT)
resp, err := h.client.Do(req)
if err != nil {
return fmt.Errorf("do request: %w", err)
}
// Discard body so connection can be reused.
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
if resp.StatusCode == http.StatusForbidden {
return fmt.Errorf("invalid JWT")
} else if resp.StatusCode != http.StatusOK {
slog.Warn("unexpected status code from /jwt.verify", slog.Int("Status", resp.StatusCode))
return fmt.Errorf("unexpected status code")
}
return nil
}
func (h *handlers) resourceIsFiltered(groupKind string) bool {
// TODO: limit to CRDs with filter-by-robot-name label in case someone adds
// new unfiltered resources in future.
return groupKind != "registry.cloudrobotics.com/robottypes"
}
// validateRequest checks that the request is expected for the cr-syncer and
// only accesses allowed resources.
func (h *handlers) validateRequest(r *http.Request, robotName string) error {
urlString := r.Header.Get("X-Original-Url")
incomingReq, err := parseURL(urlString)
if err != nil {
slog.Error("unexpected value of X-Original-Url", slog.String("URL", urlString), ilog.Err(err))
return err
}
if !h.resourceIsFiltered(incomingReq.GroupKind) {
// Unfiltered resources (eg robottypes) are always allowed.
//
// For additional defense-in-depth, we could check if the CRD has
// annotations for the cr-syncer. However, the RBAC policy in
// cr-syncer-policy.yaml already limits the client to syncable resources.
return nil
}
// TODO: check against label of upstream resource instead of assuming that
// robot xyz can access all syncable resources matching *xyz.
if incomingReq.RobotName != robotName && !strings.HasSuffix(incomingReq.ResourceName, robotName) {
slog.Error("robot impersonation rejected",
slog.String("SourceName", robotName),
slog.String("TargetName", incomingReq.RobotName+incomingReq.ResourceName),
slog.String("Kind", incomingReq.GroupKind),
slog.String("URL", urlString),
)
return errors.New("credentials rejected")
}
return nil
}
// auth is a webhook to inspect incoming requests from the cr-syncer, check if
// they are allowed, and if so, provide an Authorization header so the K8s
// apiserver will serve them. This lets nginx handle the request & response
// bodies itself.
func (h *handlers) auth(w http.ResponseWriter, r *http.Request) {
encodedJWT := strings.TrimPrefix(r.Header.Get("Authorization"), "Bearer ")
if err := h.verifyJWT(encodedJWT); err != nil {
if *acceptLegacyCredentials {
// The request already has the necessary credentials, so preserve these.
w.Header().Add("Authorization", r.Header.Get("Authorization"))
w.WriteHeader(http.StatusOK)
return
}
http.Error(w, "No valid credentials provided", http.StatusUnauthorized)
return
}
// verifyJWT() has already checked the signature so we don't need to.
claims, err := jws.Decode(encodedJWT)
if err != nil {
slog.Error("Failed to parse JWT despite previous verification")
http.Error(w, "Credentials could not be parsed", http.StatusInternalServerError)
return
}
slog.Debug("JWT parsed", slog.String("ID", claims.Sub))
if err := h.validateRequest(r, claims.Sub); err != nil {
http.Error(w, "Request not allowed", http.StatusForbidden)
return
}
// Provide a k8s token to nginx so that GKE accepts the request. Policy for
// the cr-syncer-auth-webhook ServiceAccount is defined in
// cr-syncer-policy.yaml.
k8sToken, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token")
if err != nil {
slog.Error("failed to read /var/run/secrets/kubernetes.io/serviceaccount/token", ilog.Err(err))
http.Error(w, "Internal error", http.StatusInternalServerError)
}
w.Header().Add("Authorization", "Bearer "+string(k8sToken))
w.WriteHeader(http.StatusOK)
}
func main() {
flag.Parse()
logHandler := ilog.NewLogHandler(slog.Level(*logLevel), os.Stderr)
slog.SetDefault(slog.New(logHandler))
server := &http.Server{
Addr: fmt.Sprintf(":%d", *port),
}
handlers := newHandlers()
http.HandleFunc("/healthz", handlers.health)
http.HandleFunc("/auth", handlers.auth)
go func() {
slog.Info("Serving requests...")
if err := server.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) {
slog.Error("server.ListenAndServe() failed unexpectedly", ilog.Err(err))
os.Exit(1)
}
slog.Info("Stopped serving new connections.")
}()
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
<-sigChan
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if err := server.Shutdown(ctx); err != nil {
slog.Error("server.Shutdown() failed unexpectedly", ilog.Err(err))
os.Exit(1)
}
slog.Info("Server shutdown complete.")
}
================================================
FILE: src/go/cmd/cr-syncer-auth-webhook/request.go
================================================
// request.go contains methods for understanding and validating the incoming
// request.
package main
import (
"fmt"
"net/url"
"regexp"
"slices"
"strings"
"github.com/pkg/errors"
)
// Regex for RFC 1123 subdomain format
// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names
// https://github.com/kubernetes/kubernetes/blob/976a940f4a4e84fe814583848f97b9aafcdb083f/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L209
var isValidRobotName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`).MatchString
// the prefix of the label selector query param used by the cr-syncer
const robotNameSelectorPrefix = "cloudrobotics.com/robot-name="
// incomingRequest contains the authz-relevant properties of the resource
type incomingRequest struct {
// GroupKind, eg "registry.cloudrobotics.com/robots"
GroupKind string
// RobotName, or empty if no label selector is used (eg for a Get or Update)
RobotName string
// ResourceName, or empty if no resource is specified (eg for a List or Watch of a filtered resource)
ResourceName string
}
// parseURL parses the URL that the cr-syncer is hitting to find the
// authz-relevant properties.
func parseURL(urlString string) (*incomingRequest, error) {
result := incomingRequest{}
url, err := url.Parse(urlString)
if err != nil {
return nil, err
}
// Path should be one of:
// /apis/core.kubernetes/apis///
// /apis/core.kubernetes/apis///namespaces//
// /apis/core.kubernetes/apis///namespaces///
// /apis/core.kubernetes/apis///namespaces////status
// parts[0] parts[1] parts[2] parts[3] parts[4] parts[5]
parts := strings.Split(strings.TrimPrefix(url.Path, "/apis/core.kubernetes/apis/"), "/")
if len(parts) < 3 || len(parts) > 7 {
return nil, errors.New("unexpected URL length")
}
if parts[2] != "namespaces" {
// Add in "/namespaces/default" so remaining code can use fixed indices.
// I also considered a regexp but it's not pretty:
// "/apis/core.kubernetes/apis/([^/]*)/([^/]*)(/namespaces/[^/]*)?/([^/]*)/?([^/]*)(/status)?"
parts = slices.Insert(parts, 2, "namespaces", "default")
}
result.GroupKind = fmt.Sprintf("%s/%s", parts[0], parts[4])
if len(parts) > 5 {
// if a resourceName is in the URL, we don't need to look at the query parameters
result.ResourceName = parts[5]
return &result, nil
}
// If we have no resourceName, this is a list/watch request, so check for a
// labelSelector.
params := url.Query()
labelSelectors := params["labelSelector"]
if len(labelSelectors) == 0 {
// This is an unfiltered List or Watch request (eg for robottypes).
return &result, nil
}
if len(labelSelectors) > 1 || !strings.HasPrefix(labelSelectors[0], robotNameSelectorPrefix) {
return nil, errors.New("invalid label selector")
}
result.RobotName = strings.TrimPrefix(labelSelectors[0], robotNameSelectorPrefix)
if !isValidRobotName(result.RobotName) {
return nil, errors.New("invalid robot name")
}
return &result, nil
}
================================================
FILE: src/go/cmd/cr-syncer-auth-webhook/request_test.go
================================================
package main
import (
"testing"
"github.com/google/go-cmp/cmp"
)
func TestParseURL(t *testing.T) {
tests := []struct {
desc string
url string
want incomingRequest
}{
{
desc: "watch request, filtered",
url: "http://host/apis/core.kubernetes/apis/apps.cloudrobotics.com/v1alpha1/chartassignments?labelSelector=cloudrobotics.com%2Frobot-name%3Dmy-robot",
want: incomingRequest{
GroupKind: "apps.cloudrobotics.com/chartassignments",
RobotName: "my-robot",
},
},
{
desc: "watch request, unfiltered",
url: "http://host/apis/core.kubernetes/apis/registry.cloudrobotics.com/v1alpha1/robottypes",
want: incomingRequest{
GroupKind: "registry.cloudrobotics.com/robottypes",
},
},
{
desc: "watch request, with namespace",
url: "http://host/apis/core.kubernetes/apis/apps.cloudrobotics.com/v1alpha1/namespaces/default/chartassignments?labelSelector=cloudrobotics.com%2Frobot-name%3Dmy-robot",
want: incomingRequest{
GroupKind: "apps.cloudrobotics.com/chartassignments",
RobotName: "my-robot",
},
},
{
desc: "get request",
url: "http://host/apis/core.kubernetes/apis/apps.cloudrobotics.com/v1alpha1/namespaces/default/chartassignments/resource-for-my-robot",
want: incomingRequest{
GroupKind: "apps.cloudrobotics.com/chartassignments",
ResourceName: "resource-for-my-robot",
},
},
{
desc: "status post request, with namespace",
url: "http://host/apis/core.kubernetes/apis/apps.cloudrobotics.com/v1alpha1/namespaces/default/chartassignments/resource-for-my-robot/status",
want: incomingRequest{
GroupKind: "apps.cloudrobotics.com/chartassignments",
ResourceName: "resource-for-my-robot",
},
},
{
desc: "status post request, without namespace",
url: "http://host/apis/core.kubernetes/apis/apps.cloudrobotics.com/v1alpha1/chartassignments/resource-for-my-robot/status?timeout=5m5s",
want: incomingRequest{
GroupKind: "apps.cloudrobotics.com/chartassignments",
ResourceName: "resource-for-my-robot",
},
},
}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
got, err := parseURL(tc.url)
if err != nil {
t.Fatalf("parseURL(%q) returned error: %v", tc.url, err)
}
if diff := cmp.Diff(tc.want, *got); diff != "" {
t.Errorf("parseURL(%q) returned diff (-want +got):\n%s", tc.url, diff)
}
})
}
}
func TestParseURLErrors(t *testing.T) {
tests := []struct {
desc string
url string
}{
{
desc: "empty robot name",
url: "http://host/apis/core.kubernetes/apis/apps.cloudrobotics.com/v1alpha1/chartassignments?labelSelector=cloudrobotics.com%2Frobot-name%3D",
},
{
desc: "over-broad label selector: robot-name!=my-robot",
url: "http://host/apis/core.kubernetes/apis/apps.cloudrobotics.com/v1alpha1/chartassignments?labelSelector=cloudrobotics.com%2Frobot-name%21%3Dmy-robot",
},
{
desc: "core API (not a CR)",
url: "http://host/apis/core.kubernetes/api/v1/namespaces/default/pods/cr-syncer-6676b4958d-p9hqw",
},
}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
_, err := parseURL(tc.url)
if err == nil {
t.Fatalf("parseURL(%q) succeeded unexpected", tc.url)
}
})
}
}
================================================
FILE: src/go/cmd/gcr-credential-refresher/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
load("@rules_oci//oci:defs.bzl", "oci_image")
load("@rules_pkg//pkg:tar.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = ["main.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/gcr-credential-refresher",
visibility = ["//visibility:private"],
deps = [
"//src/go/pkg/gcr:go_default_library",
"//src/go/pkg/robotauth:go_default_library",
"@io_k8s_client_go//kubernetes:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
],
)
go_binary(
name = "gcr-credential-refresher-app",
embed = [":go_default_library"],
visibility = ["//visibility:private"],
)
pkg_tar(
name = "gcr-credential-refresher-image-layer",
srcs = [":gcr-credential-refresher-app"],
extension = "tar.gz",
)
oci_image(
name = "gcr-credential-refresher-image",
base = "@distroless_base",
entrypoint = ["/gcr-credential-refresher-app"],
tars = [":gcr-credential-refresher-image-layer"],
visibility = ["//visibility:public"],
)
================================================
FILE: src/go/cmd/gcr-credential-refresher/main.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"log"
"time"
"github.com/googlecloudrobotics/core/src/go/pkg/gcr"
"github.com/googlecloudrobotics/core/src/go/pkg/robotauth"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
var (
robotIdFile = flag.String("robot_id_file", "", "robot-id.json file")
robotSAName = flag.String("service_account", "robot-service", "Robot default service account name, default: robot-service")
)
const updateInterval = 10 * time.Minute
// Updates the token used to pull images from GCR in the surrounding cluster.
func updateCredentials(ctx context.Context) error {
// Connect to the surrounding k8s cluster.
localConfig, err := rest.InClusterConfig()
if err != nil {
log.Fatal(err)
}
localClient, err := kubernetes.NewForConfig(localConfig)
if err != nil {
log.Fatal(err)
}
robotAuth, err := robotauth.LoadFromFile(*robotIdFile)
if err != nil {
log.Fatalf("failed to read robot id file %s: %v", *robotIdFile, err)
}
effectiveSA, err := robotAuth.ServiceAccountEmail(*robotSAName)
if err != nil {
log.Fatalf("failed to construct service account from '%s': %v", *robotSAName, err)
}
// Perform a token exchange with the TokenVendor in the cloud cluster and update the
// credentials used to pull images from GCR.
return gcr.UpdateGcrCredentials(ctx, localClient, robotAuth, effectiveSA)
}
// Updates the token used to pull images from GCR in the surrounding cluster. The update runs
// on startup, and then every 10 minutes.
func main() {
flag.Parse()
ctx := context.Background()
for {
if err := updateCredentials(ctx); err != nil {
log.Fatal(err)
}
log.Printf("Updated GCR credentials in local cluster")
time.Sleep(updateInterval)
}
}
================================================
FILE: src/go/cmd/http-relay-client/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
load("@rules_oci//oci:defs.bzl", "oci_image")
load("@rules_pkg//pkg:tar.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = ["main.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/http-relay-client",
visibility = ["//visibility:private"],
deps = [
"//src/go/cmd/http-relay-client/client:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@io_opencensus_go_contrib_exporter_stackdriver//:go_default_library",
],
)
go_binary(
name = "http-relay-client-app",
embed = [":go_default_library"],
)
pkg_tar(
name = "http-relay-client-image-layer",
srcs = [":http-relay-client-app"],
extension = "tar.gz",
)
oci_image(
name = "http-relay-client-image",
base = "@distroless_base",
entrypoint = ["/http-relay-client-app"],
tars = [":http-relay-client-image-layer"],
)
================================================
FILE: src/go/cmd/http-relay-client/client/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = ["client.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/http-relay-client/client",
deps = [
"//src/proto/http-relay:go_default_library",
"@com_github_cenkalti_backoff//:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@io_opencensus_go//plugin/ochttp:go_default_library",
"@io_opencensus_go//plugin/ochttp/propagation/tracecontext:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_x_net//http2:go_default_library",
"@org_golang_x_oauth2//:go_default_library",
"@org_golang_x_oauth2//google:go_default_library",
],
)
go_test(
name = "go_default_test",
size = "small",
srcs = ["client_test.go"],
embed = [":go_default_library"],
visibility = ["//visibility:private"],
deps = [
"//src/proto/http-relay:go_default_library",
"@com_github_onsi_gomega//:go_default_library",
"@in_gopkg_h2non_gock_v1//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
================================================
FILE: src/go/cmd/http-relay-client/client/client.go
================================================
// Copyright 2023 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package main runs a local HTTP relay client.
//
// See the documentation of ../http-relay-server/main.go for details about
// the system architecture. In a nutshell, this program pulls serialized HTTP
// requests from a remote relay server, redirects them to a local backend, and
// posts the serialized response to the relay server.
package client
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io"
"log/slog"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strings"
"sync"
"syscall"
"time"
pb "github.com/googlecloudrobotics/core/src/proto/http-relay"
"github.com/googlecloudrobotics/ilog"
"github.com/cenkalti/backoff"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/plugin/ochttp/propagation/tracecontext"
"go.opencensus.io/trace"
"golang.org/x/net/http2"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/protobuf/proto"
)
var (
ErrTimeout = errors.New(http.StatusText(http.StatusRequestTimeout))
ErrForbidden = errors.New(http.StatusText(http.StatusForbidden))
debugLogs bool = false
)
// This is a package internal variable which we define to be able to overwrite
// the measured time during unit tests. This is a light weight alternative
// to mocking the entire time interface and passing it along all call paths.
var timeSince = time.Since
type ClientConfig struct {
RemoteRequestTimeout time.Duration
BackendResponseTimeout time.Duration
IdleConnTimeout time.Duration
ReadIdleTimeout time.Duration
DisableAuthForRemote bool
RootCAFile string
AuthenticationTokenFile string
BackendScheme string
BackendAddress string
BackendPath string
PreserveHost bool
RelayScheme string
RelayAddress string
RelayPrefix string
ServerName string
NumPendingRequests int
MaxIdleConnsPerHost int
MaxChunkSize int
BlockSize int
DisableHttp2 bool
ForceHttp2 bool
}
type RelayServerError struct {
msg string
}
func NewRelayServerError(msg string) error {
return &RelayServerError{msg}
}
func (e *RelayServerError) Error() string {
return e.msg
}
func DefaultClientConfig() ClientConfig {
return ClientConfig{
RemoteRequestTimeout: 60 * time.Second,
BackendResponseTimeout: 100 * time.Millisecond,
// ReadIdleTimeout works around an upstream issue by enabling
// HTTP/2 PING, so we recover faster after the node IP changes.
// IdleConnTimeout is here because I was worried this would
// create unnecessary load with PINGs on long-idle connections.
// https://github.com/golang/go/issues/59690
ReadIdleTimeout: 30 * time.Second,
IdleConnTimeout: 120 * time.Second,
DisableAuthForRemote: false,
RootCAFile: "",
AuthenticationTokenFile: "",
BackendScheme: "https",
BackendAddress: "localhost:8080",
BackendPath: "",
PreserveHost: true,
RelayScheme: "https",
RelayAddress: "localhost:8081",
RelayPrefix: "",
ServerName: "server_name",
NumPendingRequests: 1,
MaxIdleConnsPerHost: 100,
MaxChunkSize: 50 * 1024,
BlockSize: 10 * 1024,
DisableHttp2: false,
ForceHttp2: false,
}
}
type Client struct {
config ClientConfig
}
func NewClient(config ClientConfig) *Client {
c := &Client{}
c.config = config
return c
}
func (c *Client) Start() {
var err error
remoteTransport := http.DefaultTransport.(*http.Transport).Clone()
remoteTransport.MaxIdleConns = c.config.MaxIdleConnsPerHost
remoteTransport.MaxIdleConnsPerHost = c.config.MaxIdleConnsPerHost
remoteTransport.IdleConnTimeout = c.config.IdleConnTimeout
http2Trans, err := http2.ConfigureTransports(remoteTransport)
if err == nil {
http2Trans.ReadIdleTimeout = c.config.ReadIdleTimeout
}
remote := &http.Client{Transport: remoteTransport}
if !c.config.DisableAuthForRemote {
ctx := context.WithValue(context.Background(), oauth2.HTTPClient, remote)
scope := "https://www.googleapis.com/auth/cloud-platform.read-only"
if remote, err = google.DefaultClient(ctx, scope); err != nil {
slog.Error("unable to set up credentials for relay-server authentication", ilog.Err(err))
os.Exit(1)
}
}
remote.Timeout = c.config.RemoteRequestTimeout
var tlsConfig *tls.Config
if c.config.RootCAFile != "" {
rootCAs := x509.NewCertPool()
certs, err := os.ReadFile(c.config.RootCAFile)
if err != nil {
slog.Error("Failed to read CA file", slog.String("File", c.config.RootCAFile), ilog.Err(err))
os.Exit(1)
}
if ok := rootCAs.AppendCertsFromPEM(certs); !ok {
slog.Error("No certs found", slog.String("File", c.config.RootCAFile))
os.Exit(1)
}
tlsConfig = &tls.Config{RootCAs: rootCAs}
if keyLogFile := os.Getenv("SSLKEYLOGFILE"); keyLogFile != "" {
keyLog, err := os.OpenFile(keyLogFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
slog.Warn("Cannot open keylog file (check SSLKEYLOGFILE env var)", slog.String("File", keyLogFile), ilog.Err(err))
} else {
tlsConfig.KeyLogWriter = keyLog
}
}
}
var transport http.RoundTripper
if c.config.ForceHttp2 {
h2transport := &http2.Transport{}
h2transport.TLSClientConfig = tlsConfig
if c.config.DisableHttp2 {
slog.Error("Cannot use --force_http2 together with --disable_http2")
os.Exit(1)
}
if c.config.BackendScheme == "http" {
// Enable HTTP/2 Cleartext (H2C) for gRPC backends.
h2transport.AllowHTTP = true
h2transport.DialTLS = func(network, addr string, cfg *tls.Config) (net.Conn, error) {
// Pretend we are dialing a TLS endpoint.
// Note, we ignore the passed tls.Config
return net.Dial(network, addr)
}
}
transport = h2transport
} else {
h1transport := http.DefaultTransport.(*http.Transport).Clone()
h1transport.MaxIdleConns = c.config.MaxIdleConnsPerHost
h1transport.MaxIdleConnsPerHost = c.config.MaxIdleConnsPerHost
h1transport.TLSClientConfig = tlsConfig
if c.config.DisableHttp2 {
// Fix for: http2: invalid Upgrade request header: ["SPDY/3.1"]
// according to the docs:
// Programs that must disable HTTP/2 can do so by setting Transport.TLSNextProto (for clients) or
// Server.TLSNextProto (for servers) to a non-nil, empty map.
//
h1transport.TLSNextProto = map[string]func(authority string, c *tls.Conn) http.RoundTripper{}
}
transport = h1transport
}
// TODO(https://github.com/golang/go/issues/31391): reimplement timeouts if possible
// (see also https://github.com/golang/go/issues/30876)
local := &http.Client{
CheckRedirect: func(*http.Request, []*http.Request) error {
// Don't follow redirects: instead, pass them through the relay untouched.
return http.ErrUseLastResponse
},
Transport: &ochttp.Transport{Base: transport},
}
wg := new(sync.WaitGroup)
wg.Add(c.config.NumPendingRequests)
for i := 0; i < c.config.NumPendingRequests; i++ {
go c.localProxyWorker(remote, local)
}
// Waiting for all goroutines to finish (they never do)
wg.Wait()
}
func addServiceName(span *trace.Span) {
relayClientAttr := trace.StringAttribute("service.name", "http-relay-client")
span.AddAttributes(relayClientAttr)
}
func (c *Client) getRequest(remote *http.Client, relayURL string) (*pb.HttpRequest, error) {
if debugLogs {
slog.Info("Connecting to relay server to get next request", slog.String("ServerName", c.config.ServerName))
}
resp, err := remote.Get(relayURL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode == http.StatusRequestTimeout {
return nil, ErrTimeout
}
if resp.StatusCode == http.StatusForbidden {
return nil, ErrForbidden
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("server status %s: %s", http.StatusText(resp.StatusCode), string(body))
}
breq := pb.HttpRequest{}
err = proto.Unmarshal(body, &breq)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal request: %v. request was: %q", err, string(body))
}
return &breq, nil
}
func marshalHeader(h *http.Header) []*pb.HttpHeader {
r := []*pb.HttpHeader{}
for k, vs := range *h {
for _, v := range vs {
r = append(r, &pb.HttpHeader{Name: proto.String(k), Value: proto.String(v)})
}
}
return r
}
func extractRequestHeader(breq *pb.HttpRequest, header *http.Header) {
for _, h := range breq.Header {
header.Add(*h.Name, *h.Value)
}
}
func (c *Client) createBackendRequest(breq *pb.HttpRequest) (*http.Request, error) {
id := *breq.Id
targetUrl, err := url.Parse(*breq.Url)
if err != nil {
return nil, err
}
targetUrl.Scheme = c.config.BackendScheme
targetUrl.Host = c.config.BackendAddress
targetUrl.Path = c.config.BackendPath + targetUrl.Path
slog.Debug("Sending request to backend",
slog.String("ID", id),
slog.String("Method", *breq.Method),
slog.Any("TargetURL", *targetUrl))
req, err := http.NewRequest(*breq.Method, targetUrl.String(), bytes.NewReader(breq.Body))
if err != nil {
return nil, err
}
if c.config.PreserveHost && breq.Host != nil {
req.Host = *breq.Host
}
extractRequestHeader(breq, &req.Header)
if c.config.AuthenticationTokenFile != "" {
token, err := os.ReadFile(c.config.AuthenticationTokenFile)
if err != nil {
return nil, fmt.Errorf("Failed to read authentication token from %s: %v", c.config.AuthenticationTokenFile, err)
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
}
if debugLogs {
dump, _ := httputil.DumpRequest(req, false)
slog.Info("DumpRequest", slog.String("Request", string(dump)))
}
return req, nil
}
// This function builds and executes a http.Request from the proto request we
// received from the user-client. This user-client (e.g. Chrome) request is
// executed in the network in which the relay-client is running. In case of
// our on-prem cluster, these requests are processed by Istio and sent to the
// relevant in-cluster service.
// It returns both a new pb.HttpResponse as well as the related http.Response so
// that the caller can access e.g. http trailers once the response body has
// been read.
func makeBackendRequest(ctx context.Context, local *http.Client, req *http.Request, id string) (*pb.HttpResponse, *http.Response, error) {
_, backendSpan := trace.StartSpan(ctx, "Sent."+req.URL.Path)
addServiceName(backendSpan)
f := &tracecontext.HTTPFormat{}
f.SpanContextToRequest(backendSpan.SpanContext(), req)
resp, err := local.Do(req)
if err != nil {
backendSpan.End()
return nil, nil, err
}
backendSpan.End()
_, backendResp := trace.StartSpan(ctx, "Creating response (proto marshaling)")
addServiceName(backendResp)
defer backendResp.End()
if debugLogs {
slog.Info("Backend responded", slog.String("ID", id), slog.Int("Status", resp.StatusCode))
dump, _ := httputil.DumpResponse(resp, false)
slog.Info("DumpResponse", slog.String("Response", string(dump)))
// We get 'Grpc-Status' and 'Grpc-Message' headers that we need to persist.
// Why is it not part of Trailers?
slog.Info("Headers",
slog.String("ID", id),
slog.String("Header", fmt.Sprintf("%+v", resp.Header)))
// Initially only keys, values are set after body has be read (EOF)
slog.Info("Trailers",
slog.String("ID", id),
slog.String("Trailer", fmt.Sprintf("%+v", resp.Trailer)))
}
return &pb.HttpResponse{
Id: proto.String(id),
StatusCode: proto.Int32(int32(resp.StatusCode)),
Header: marshalHeader(&resp.Header),
Trailer: marshalHeader(&resp.Trailer),
}, resp, nil
}
func (c *Client) postResponse(remote *http.Client, br *pb.HttpResponse) error {
body, err := proto.Marshal(br)
if err != nil {
return err
}
responseUrl := url.URL{
Scheme: c.config.RelayScheme,
Host: c.config.RelayAddress,
Path: c.config.RelayPrefix + "/server/response",
}
resp, err := remote.Post(responseUrl.String(), "application/vnd.google.protobuf;proto=cloudrobotics.http_relay.v1alpha1.HttpResponse", bytes.NewReader(body))
if err != nil {
return fmt.Errorf("couldn't post response to relay server: %v", err)
}
defer resp.Body.Close()
body, err = io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("couldn't read relay server's response body: %v", err)
}
if resp.StatusCode != http.StatusOK {
err := NewRelayServerError(fmt.Sprintf("relay server responded %s: %s", http.StatusText(resp.StatusCode), body))
if resp.StatusCode == http.StatusBadRequest {
// http-relay-server may have restarted or the client cancelled the request.
return backoff.Permanent(err)
}
return err
}
// body is only 2 bytes 'ok'
return nil
}
// streamBytes converts an io.Reader into a channel to enable select{}-style timeouts.
func (c *Client) streamBytes(id string, in io.ReadCloser, out chan<- []byte) {
eof := false
for !eof {
// This must be a new buffer each time, as the channel is not making a copy
buffer := make([]byte, c.config.BlockSize)
if debugLogs {
slog.Info("Reading from backend", slog.String("ID", id))
}
n, err := in.Read(buffer)
if err != nil && err != io.EOF {
slog.Error("Failed to read from backend", slog.String("ID", id), ilog.Err(err))
}
eof = err != nil
if n > 0 {
if debugLogs {
slog.Info("Forward from backend", slog.String("ID", id), slog.Int("ByteCount", n))
}
out <- buffer[:n]
}
}
if debugLogs {
slog.Info("Got EOF reading from backend", slog.String("ID", id))
}
close(out)
}
// buildResponses collates the bytes from the in stream into HttpResponse objects.
// This function needs to consider three cases:
// - Data is coming fast. We chunk the data into 'maxChunkSize' blocks and keep sending it.
// - Data is trickling slow. We accumulate data for the timeout duration and then send it.
// Timeout is determined by the maximum latency the user should see.
// - No data needs to be transferred. We keep sending empty responses every few seconds
// to show the relay server that we're still alive.
func (c *Client) buildResponses(in <-chan []byte, resp *pb.HttpResponse, out chan<- *pb.HttpResponse) {
defer close(out)
timer := time.NewTimer(c.config.BackendResponseTimeout)
timeouts := 0
// TODO(haukeheibel): Why are we not simply reading the entire body? Why the chunking?
for {
select {
case b, more := <-in:
resp.Body = append(resp.Body, b...)
if !more {
if debugLogs {
slog.Info("Posting final response to relay",
slog.String("ID", *resp.Id), slog.Int("ByteCount", len(resp.Body)))
}
resp.Eof = proto.Bool(true)
out <- resp
return
} else if len(resp.Body) > c.config.MaxChunkSize {
if debugLogs {
slog.Info("Posting intermediate response to relay",
slog.String("ID", *resp.Id), slog.Int("ByteCount", len(resp.Body)))
}
out <- resp
resp = &pb.HttpResponse{Id: resp.Id}
timeouts = 0
}
case <-timer.C:
timer.Reset(c.config.BackendResponseTimeout)
timeouts += 1
// We send an (empty) response after 30 timeouts as a keep-alive packet.
if len(resp.Body) > 0 || resp.StatusCode != nil || timeouts > 30 {
if debugLogs {
slog.Info("Posting partial response to relay",
slog.String("ID", *resp.Id), slog.Int("ByteCount", len(resp.Body)))
}
out <- resp
resp = &pb.HttpResponse{Id: resp.Id}
timeouts = 0
}
}
}
}
// postErrorResponse resolves the client's request in case of an internal error.
// This is not strictly necessary, but avoids kubectl hanging in such cases. As
// this is best-effort, errors posting the response are logged and ignored.
func (c *Client) postErrorResponse(remote *http.Client, id string, message string) {
resp := &pb.HttpResponse{
Id: proto.String(id),
StatusCode: proto.Int32(http.StatusInternalServerError),
Header: []*pb.HttpHeader{{
Name: proto.String("Content-Type"),
Value: proto.String("text/plain"),
}},
Body: []byte(message),
Eof: proto.Bool(true),
}
if err := c.postResponse(remote, resp); err != nil {
slog.Error("Failed to post error response to relay",
slog.String("ID", *resp.Id), ilog.Err(err))
}
}
// streamToBackend streams data from the client (eg kubectl) to the
// backend. For example, when using `kubectl exec` this handles stdin.
// It fails permanently and closes the backend connection on any failure, as
// the relay-server doesn't have sufficiently advanced flow control to recover
// from dropped/duplicate "packets".
func (c *Client) streamToBackend(remote *http.Client, id string, backendWriter io.WriteCloser) {
// Close the backend connection on stream failure. This should cause the
// response stream to end and prevent the client from hanging in the case
// of an error in the request stream.
defer backendWriter.Close()
streamURL := (&url.URL{
Scheme: c.config.RelayScheme,
Host: c.config.RelayAddress,
Path: c.config.RelayPrefix + "/server/requeststream",
RawQuery: "id=" + id,
}).String()
for {
// Get data from the "request stream", then copy it to the backend.
// We use a Post with empty body to avoid caching.
resp, err := remote.Post(streamURL, "text/plain", http.NoBody)
if err != nil {
// TODO(rodrigoq): detect transient failure and retry w/ backoff?
// e.g. "server status Request Timeout: No request received within timeout"
slog.Error("Failed to get request stream",
slog.String("ID", id), ilog.Err(err))
return
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusGone {
if debugLogs {
slog.Info("End of request stream", slog.String("ID", id))
}
return
} else if resp.StatusCode != http.StatusOK {
msg, err := io.ReadAll(resp.Body)
if err != nil {
msg = []byte(fmt.Sprintf("", err))
}
if debugLogs {
slog.Info("Relay server request stream responded",
slog.String("ID", id),
slog.String("Status", http.StatusText(resp.StatusCode)),
slog.String("Message", string(msg)))
}
return
}
if n, err := io.Copy(backendWriter, resp.Body); err != nil {
slog.Error("Failed to write to backend:",
slog.String("ID", id), ilog.Err(err))
return
} else {
if debugLogs {
slog.Info("Wrote to backend",
slog.String("ID", id), slog.Int64("ByteCount", n))
}
}
}
}
func (c *Client) handleRequest(remote *http.Client, local *http.Client, pbreq *pb.HttpRequest) {
ts := time.Now()
id := *pbreq.Id
req, err := c.createBackendRequest(pbreq)
if err != nil {
c.postErrorResponse(remote, id, fmt.Sprintf("Failed to create request for backend: %v", err))
}
// Measure edge processing time.
f := &tracecontext.HTTPFormat{}
ctx := req.Context()
var span *trace.Span
if sctx, ok := f.SpanContextFromRequest(req); ok {
ctx, span = trace.StartSpanWithRemoteParent(ctx, "Recv."+req.URL.Path, sctx)
} else {
ctx, span = trace.StartSpan(ctx, "Recv."+req.URL.Path)
}
addServiceName(span)
defer span.End()
resp, hresp, err := makeBackendRequest(ctx, local, req, id)
if err != nil {
// Even if we couldn't handle the backend request, send an
// answer to the relay that signals the error.
errorMessage := fmt.Sprintf("Backend request failed with error: %v", err)
slog.Error("BackendRequest",
slog.String("ID", id), slog.String("Message", errorMessage))
c.postErrorResponse(remote, id, errorMessage)
return
}
if *resp.StatusCode == http.StatusSwitchingProtocols {
// A 101 Switching Protocols response means that the request will be
// used for bidirectional streaming, so start a goroutine to stream
// from client to backend.
bodyWriter, ok := hresp.Body.(io.WriteCloser)
if !ok {
slog.Warn("Error: 101 Switching Protocols response with non-writable body.")
slog.Warn(" This occurs when using Go <1.12 or when http.Client.Timeout > 0.")
c.postErrorResponse(remote, id, "Backend returned 101 Switching Protocols, which is not supported.")
return
}
// Stream stdin from remote to backend
go c.streamToBackend(remote, id, bodyWriter)
} else {
// `streamToBackend` will close `hresp.Body` but it is only called on websocket connections.
// We need to close it here for http connections.
defer hresp.Body.Close()
}
ctx, respChSpan := trace.StartSpan(ctx, "Building (chunked) response channel")
addServiceName(respChSpan)
bodyChannel := make(chan []byte)
responseChannel := make(chan *pb.HttpResponse)
// Stream stdout from backend to bodyChannel
go c.streamBytes(*resp.Id, hresp.Body, bodyChannel)
// collect data from bodyChannel and send to remote (relay-server)
go c.buildResponses(bodyChannel, resp, responseChannel)
respChSpan.End()
exponentialBackoff := backoff.ExponentialBackOff{
InitialInterval: time.Second,
RandomizationFactor: 0,
Multiplier: 2,
MaxInterval: 10 * time.Second,
MaxElapsedTime: 0,
Clock: backoff.SystemClock,
}
// This call here blocks until all data from the bodyChannel has been read.
for resp := range responseChannel {
_, respCh := trace.StartSpan(ctx, "Sending response from channel")
addServiceName(respCh)
defer respCh.End()
// Q(hauke): do we really need exponential backoff in the relay?
exponentialBackoff.Reset()
err := backoff.RetryNotify(
func() error {
if len(hresp.Trailer) > 0 {
slog.Info("Trailers",
slog.String("ID", *resp.Id),
slog.String("Trailer", fmt.Sprintf("%+v", hresp.Trailer)))
resp.Trailer = append(resp.Trailer, marshalHeader(&hresp.Trailer)...)
}
if resp.Eof != nil && *resp.Eof {
duration := timeSince(ts)
resp.BackendDurationMs = proto.Int64(duration.Milliseconds())
// see makeBackendRequest()
urlPath := strings.TrimPrefix(*pbreq.Url, "http://invalid")
slog.Debug("Backend request",
slog.String("ID", *resp.Id),
slog.Float64("Duration", duration.Seconds()),
slog.String("Path", urlPath))
} else {
// Q(hauke): When are we ending up in this branch?
// What are the semantics and why are we not setting a request duration?
// Even in a streaming case I would expect a duration which represents the
// processing time of the last item.
}
return c.postResponse(remote, resp)
},
backoff.WithMaxRetries(&exponentialBackoff, 10),
func(err error, _ time.Duration) {
slog.Error("Failed to post response to relay",
slog.String("ID", *resp.Id), ilog.Err(err))
},
)
// Any error suggests the request should be aborted.
// A missing chunk will cause clients to receive corrupted data, in most cases it is better
// to close the connection to avoid that.
if err != nil {
slog.Error("Closing backend connection",
slog.String("ID", *resp.Id), ilog.Err(err))
// This is also closed in streamToBackend.
// Closing here too to ensure the disconnect propagates faster.
hresp.Body.Close()
// Drain the response channel to avoid blocking buildResponses.
for range responseChannel {
}
break
}
}
}
func (c *Client) localProxy(remote, local *http.Client) error {
// Read pending request from the relay-server.
relayURL := c.buildRelayURL()
var req *pb.HttpRequest = nil
exponentialBackoff := backoff.ExponentialBackOff{
InitialInterval: 100 * time.Millisecond,
RandomizationFactor: 0,
Multiplier: 1.5,
MaxInterval: 10 * time.Second,
MaxElapsedTime: 60 * time.Second,
Clock: backoff.SystemClock,
}
err := backoff.RetryNotify(func() error {
var err error
req, err = c.getRequest(remote, relayURL)
if errors.Is(err, ErrTimeout) {
return backoff.Permanent(err)
}
return err
}, &exponentialBackoff, func(err error, _ time.Duration) {
if err == nil {
return
}
if errors.Is(err, ErrForbidden) {
slog.Error("failed to authenticate to cloud-api, restarting", ilog.Err(err))
os.Exit(1)
} else if errors.Is(err, syscall.ECONNREFUSED) {
slog.Warn("Failed to connect to relay server. Retrying.")
return
}
})
if err != nil {
return err
}
// Forward the request to the backend.
go c.handleRequest(remote, local, req)
return nil
}
func (c *Client) localProxyWorker(remote, local *http.Client) {
slog.Info("Starting to relay server request loop", slog.String("ServerName", c.config.ServerName))
for {
err := c.localProxy(remote, local)
if err != nil && !errors.Is(err, ErrTimeout) {
slog.Error("localProxy", ilog.Err(err))
time.Sleep(1 * time.Second)
}
}
}
func (c *Client) buildRelayURL() string {
query := url.Values{}
query.Add("server", c.config.ServerName)
relayURL := url.URL{
Scheme: c.config.RelayScheme,
Host: c.config.RelayAddress,
Path: c.config.RelayPrefix + "/server/request",
RawQuery: query.Encode(),
}
return relayURL.String()
}
================================================
FILE: src/go/cmd/http-relay-client/client/client_test.go
================================================
// Copyright 2023 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"net/http"
"testing"
"time"
pb "github.com/googlecloudrobotics/core/src/proto/http-relay"
. "github.com/onsi/gomega"
"google.golang.org/protobuf/proto"
"gopkg.in/h2non/gock.v1"
)
func assertMocksDoneWithin(t *testing.T, d time.Duration) {
for start := time.Now(); time.Since(start) < d; {
if gock.IsDone() {
return
}
time.Sleep(time.Millisecond)
}
for _, m := range gock.Pending() {
t.Errorf("mock still pending after %s: %v", d, m.Request().URLStruct)
}
}
func TestAssertMocksDoneWithin_SucceedsWhenMocksAreDone(t *testing.T) {
assertMocksDoneWithin(t, time.Millisecond)
}
func TestAssertMocksDoneWithin_FailsWhenMocksNotDone(t *testing.T) {
defer gock.Off()
gock.New("https://localhost:8081")
faket := &testing.T{}
assertMocksDoneWithin(faket, time.Millisecond)
if !faket.Failed() {
t.Errorf("assertMocksDoneWithin didn't trigger an error despite outstanding mocks")
}
}
func TestLocalProxy(t *testing.T) {
// Hot patch: gock refuses to match bodies with unknown content-types by default.
gock.BodyTypes = append(gock.BodyTypes, "application/vnd.google.protobuf;proto=cloudrobotics.http_relay.v1alpha1.HttpResponse")
defer gock.Off()
// We expect the response below to always contain 0 milliseconds.
timeSince = func(t time.Time) time.Duration { return 0 * time.Millisecond }
req, _ := proto.Marshal(&pb.HttpRequest{
Id: proto.String("15"),
Method: proto.String("GET"),
Url: proto.String("http://invalid/foo/bar?a=b"),
Header: []*pb.HttpHeader{{
Name: proto.String("X-GFE"),
Value: proto.String("google.com")}},
Body: []byte("thebody"),
})
resp, _ := proto.Marshal(&pb.HttpResponse{
Id: proto.String("15"),
StatusCode: proto.Int32(201),
Header: []*pb.HttpHeader{
{
Name: proto.String("Priority"),
Value: proto.String("High"),
},
},
Body: []byte("theresponsebody"),
Eof: proto.Bool(true),
BackendDurationMs: proto.Int64(0),
})
gock.New("https://localhost:8081").
Get("/server/request").
MatchParam("server", "foo").
Reply(200).
BodyString(string(req))
gock.New("https://localhost:8080").
Get("/foo/bar").
MatchParam("a", "b").
MatchHeader("X-GFE", "google.com").
BodyString("thebody").
Reply(201).
SetHeader("Priority", "High").
BodyString("theresponsebody")
gock.New("https://localhost:8081").
Post("/server/response").
Body(bytes.NewReader(resp)).
Reply(200)
config := DefaultClientConfig()
config.ServerName = "foo"
client := NewClient(config)
err := client.localProxy(&http.Client{}, &http.Client{})
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
assertMocksDoneWithin(t, 10*time.Second)
}
func TestBackendError(t *testing.T) {
// Hot patch: gock refuses to match bodies with unknown content-types by default.
gock.BodyTypes = append(gock.BodyTypes, "application/vnd.google.protobuf;proto=cloudrobotics.http_relay.v1alpha1.HttpResponse")
defer gock.Off()
// We expect the response below to always contain 0 milliseconds.
timeSince = func(t time.Time) time.Duration { return 0 * time.Millisecond }
// The pending request on the relay-server side.
req, _ := proto.Marshal(&pb.HttpRequest{
Id: proto.String("15"),
Method: proto.String("GET"),
Url: proto.String("http://invalid/foo/bar?a=b"),
Header: []*pb.HttpHeader{{
Name: proto.String("X-GFE"),
Value: proto.String("google.com")}},
Body: []byte("thebody"),
})
resp, _ := proto.Marshal(&pb.HttpResponse{
Id: proto.String("15"),
StatusCode: proto.Int32(400),
Body: []byte("theresponsebody"),
Eof: proto.Bool(true),
BackendDurationMs: proto.Int64(0),
})
relayServerAddress := "https://localhost:8081"
backendServerAddress := "https://localhost:8080"
// Mocks the response from the relay server from which we are getting
// the initial data.
gock.New(relayServerAddress).
Get("/server/request").
MatchParam("server", "foo").
Reply(200).
BodyString(string(req))
// Mocks the response from the backend server to which we relayed data.
gock.New(backendServerAddress).
Get("/foo/bar").
MatchParam("a", "b").
MatchHeader("X-GFE", "google.com").
BodyString("thebody").
Reply(400).
BodyString("theresponsebody")
// Mocks the response from the realy-server after having received the
// actual backend response.
gock.New(relayServerAddress).
Post("/server/response").
Body(bytes.NewReader(resp)).
Reply(200)
config := DefaultClientConfig()
config.ServerName = "foo"
client := NewClient(config)
// localProxy ...
// 1. pulls a request from the realy-server (/server/request)
// 2. send that request to the backend server (here localhost:8080/foo/bar?a=b)
// 3. retrieves the response from the backend and sends it to the relay-server
err := client.localProxy(&http.Client{}, &http.Client{})
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
assertMocksDoneWithin(t, 10*time.Second)
}
func TestServerTimeout(t *testing.T) {
// Hot patch: gock refuses to match bodies with application/octet-data
// by default.
gock.BodyTypes = append(gock.BodyTypes, "application/octet-data")
defer gock.Off()
req, _ := proto.Marshal(&pb.HttpRequest{
Id: proto.String("15"),
Method: proto.String("GET"),
Url: proto.String("http://invalid/foo/bar?a=b"),
Header: []*pb.HttpHeader{{
Name: proto.String("X-GFE"),
Value: proto.String("google.com")}},
Body: []byte("thebody"),
})
gock.New("https://localhost:8081").
Get("/server/request").
MatchParam("server", "foo").
Reply(408).
BodyString(string(req))
config := DefaultClientConfig()
config.ServerName = "foo"
client := NewClient(config)
err := client.localProxy(&http.Client{}, &http.Client{})
if err != ErrTimeout {
t.Errorf("Unexpected error: %v", err)
}
assertMocksDoneWithin(t, 10*time.Second)
}
func TestBuildResponsesTimesOut(t *testing.T) {
g := NewGomegaWithT(t)
bodyChannel := make(chan []byte)
responseChannel := make(chan *pb.HttpResponse)
resp := &pb.HttpResponse{
Id: proto.String("20"),
StatusCode: proto.Int32(200),
}
config := DefaultClientConfig()
config.BackendResponseTimeout = 10 * time.Millisecond
client := NewClient(config)
go client.buildResponses(bodyChannel, resp, responseChannel)
bodyChannel <- []byte("foo")
resp = <-responseChannel
g.Expect(*resp.Id).To(Equal("20"))
g.Expect(*resp.StatusCode).To(Equal(int32(200)))
g.Expect(string(resp.Body)).To(Equal("foo"))
g.Expect(resp.Eof).To(BeNil())
bodyChannel <- []byte("bar")
resp = <-responseChannel
g.Expect(*resp.Id).To(Equal("20"))
g.Expect(resp.StatusCode).To(BeNil())
g.Expect(string(resp.Body)).To(Equal("bar"))
g.Expect(resp.Eof).To(BeNil())
close(bodyChannel)
resp = <-responseChannel
g.Expect(*resp.Id).To(Equal("20"))
g.Expect(resp.StatusCode).To(BeNil())
g.Expect(string(resp.Body)).To(Equal(""))
g.Expect(*resp.Eof).To(Equal(true))
}
================================================
FILE: src/go/cmd/http-relay-client/main.go
================================================
// Copyright 2023 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package main runs a local HTTP relay client.
//
// See the documentation of ../http-relay-server/main.go for details about
// the system architecture. In a nutshell, this program pulls serialized HTTP
// requests from a remote relay server, redirects them to a local backend, and
// posts the serialized response to the relay server.
package main
import (
"flag"
"fmt"
"log/slog"
"net/http"
_ "net/http/pprof"
"os"
"contrib.go.opencensus.io/exporter/stackdriver"
"github.com/googlecloudrobotics/core/src/go/cmd/http-relay-client/client"
"github.com/googlecloudrobotics/ilog"
"go.opencensus.io/trace"
)
var (
config client.ClientConfig
stackdriverProjectID string
logLevel int
pprofPort int
)
func init() {
config = client.DefaultClientConfig()
// We set the default values for all command line flags to be equal to the
// values in the default client config to ensure consistency between the two.
flag.StringVar(&config.BackendScheme, "backend_scheme", config.BackendScheme,
"Connection scheme (http, https) for connection from relay "+
"client to backend server")
flag.StringVar(&config.BackendAddress, "backend_address", config.BackendAddress,
"Hostname of the backend server as seen by the relay client")
flag.StringVar(&config.BackendPath, "backend_path", config.BackendPath,
"Path prefix for backend requests (default: none)")
flag.BoolVar(&config.PreserveHost, "preserve_host", config.PreserveHost,
"Preserve Host header of the original request for "+
"compatibility with cross-origin request checks.")
flag.StringVar(&config.RelayScheme, "relay_scheme", config.RelayScheme,
"Connection scheme (http, https) for connection from relay "+
"client to relay server")
flag.StringVar(&config.RelayAddress, "relay_address", config.RelayAddress,
"Hostname of the relay server as seen by the relay client")
flag.StringVar(&config.RelayPrefix, "relay_prefix", config.RelayPrefix,
"Path prefix for the relay server")
flag.StringVar(&config.ServerName, "server_name", config.ServerName,
"Fetch requests from the relay server for this server name")
flag.StringVar(&config.AuthenticationTokenFile, "authentication_token_file", config.AuthenticationTokenFile,
"File with authentication token for backend requests")
flag.StringVar(&config.RootCAFile, "root_ca_file", config.RootCAFile,
"File with root CA cert for SSL")
flag.IntVar(&config.MaxChunkSize, "max_chunk_size", config.MaxChunkSize,
"Max size of data in bytes to accumulate before sending to the peer")
flag.IntVar(&config.BlockSize, "block_size", config.BlockSize,
"Size of i/o buffer in bytes")
flag.IntVar(&config.NumPendingRequests, "num_pending_requests", config.NumPendingRequests,
"Number of pending http requests to the relay")
flag.IntVar(&config.MaxIdleConnsPerHost, "max_idle_conns_per_host", config.MaxIdleConnsPerHost,
"The maximum number of idle (keep-alive) connections to keep per-host")
flag.BoolVar(&config.DisableHttp2, "disable_http2", config.DisableHttp2,
"Disable http2 protocol usage (e.g. for channels that use special streaming protocols such as SPDY).")
flag.BoolVar(&config.ForceHttp2, "force_http2", config.ForceHttp2,
"Force enable http2 protocol usage through the use of go's http2 transport (e.g. when relaying grpc).")
flag.BoolVar(&config.DisableAuthForRemote, "disable_auth_for_remote", config.DisableAuthForRemote,
"Disable auth when talking to the relay server for local testing.")
// The stackdriver project ID is a client independent variable and so we
// initialize it independently.
flag.StringVar(&stackdriverProjectID, "trace-stackdriver-project-id", "",
"If not empty, traces will be uploaded to this Google Cloud Project.")
flag.IntVar(&logLevel, "log_level", int(slog.LevelInfo),
"the log message level required to be logged")
flag.IntVar(&pprofPort, "pprof_port", 0, "If non-zero, serves pprof endpoints on this port.")
}
func main() {
flag.Parse()
if pprofPort != 0 {
go func() {
slog.Info("Starting pprof server", slog.Int("port", pprofPort))
err := http.ListenAndServe(fmt.Sprintf(":%d", pprofPort), nil)
slog.Error("pprof server failed", ilog.Err(err))
}()
}
logHandler := ilog.NewLogHandler(slog.Level(logLevel), os.Stderr)
slog.SetDefault(slog.New(logHandler))
if stackdriverProjectID != "" {
sd, err := stackdriver.NewExporter(stackdriver.Options{
ProjectID: stackdriverProjectID,
})
if err != nil {
slog.Error("Failed to create the Stackdriver exporter", slog.String("Project", stackdriverProjectID), ilog.Err(err))
os.Exit(1)
} else {
trace.RegisterExporter(sd)
defer sd.Flush()
}
}
client := client.NewClient(config)
client.Start()
}
================================================
FILE: src/go/cmd/http-relay-server/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
load("@rules_oci//oci:defs.bzl", "oci_image")
load("@rules_pkg//pkg:tar.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = ["main.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/http-relay-server",
visibility = ["//visibility:private"],
deps = [
"//src/go/cmd/http-relay-server/server:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@io_opencensus_go_contrib_exporter_stackdriver//:go_default_library",
],
)
go_binary(
name = "http-relay-server-app",
embed = [":go_default_library"],
)
pkg_tar(
name = "http-relay-server-image-layer",
srcs = [":http-relay-server-app"],
extension = "tar.gz",
)
oci_image(
name = "http-relay-server-image",
base = "@distroless_base",
entrypoint = ["/http-relay-server-app"],
tars = [":http-relay-server-image-layer"],
)
================================================
FILE: src/go/cmd/http-relay-server/README.md
================================================
# HTTP Relay Server
The http-relay-server multiplexes HTTP requests between user-clients and backends (robots) via a relay-client. It exists to make HTTP endpoints on robots accessible without requiring a public endpoint on the robot itself.
## How it works
It binds to a public endpoint accessible by both user-client and backend, and works together with a relay-client that's colocated with the backend. This allows multiple backends to be accessible through a single relay-server instance, and supports multiple concurrent user-clients.
```mermaid
flowchart LR
subgraph "LAN (User)"
user-client
end
subgraph Internet
relay-server
end
subgraph "LAN (Robot)"
relay-client
backend
end
user-client -->|HTTP Request| relay-server
%% Workaround https://github.com/mermaid-js/mermaid/issues/3208
relay-server ~~~ relay-client -->|Poll | relay-server
relay-client ~~~ relay-server
relay-client -->|Forward Request| backend
```
The relay-server is multiplexing: It allows multiple relay-clients to
connect under unique names, each handling requests for a subpath of `/client`.
Alternatively (e.g. for gRPC connections) the backend can be selected by
omitting the client prefix and passing an `X-Server-Name` header.
### Sequence of operations
1. User-client makes request on `/client/$foo/$request`.
2. Relay-server assigns an ID and stores request (with path `$request`) in
memory. It keeps the user-client's request pending.
3. Relay-client requests `/server/request?server=$foo`
4. Relay-server responds with stored request (or timeout if no request comes
in within the next 30 sec).
5. Relay-client makes the stored request to backend.
6. Backend replies.
7. Relay-client posts backend's reply to `/server/response`.
8. Relay-server responds to client's request with backend's reply.
For some requests (e.g. `kubectl exec`), the backend responds with
`101 Switching Protocols`, resulting in the following operations:
1. Relay-server responds to client's request with backend's 101 reply.
2. User-client sends bytes from stdin to the relay-server.
3. Relay-client requests `/server/requeststream?id=$id`.
4. Relay-server responds with stdin bytes from client.
5. Relay-client sends stdin bytes to backend.
6. Backend sends stdout bytes to relay-client.
7. Relay-client posts stdout bytes to `/server/response`.
8. Relay-server sends stdout bytes to the client.
This simplified graphic shows the back-and-forth for an `exec` request:
```mermaid
sequenceDiagram
participant user-client as user-client (kubectl exec)
participant relay-server
participant relay-client
participant backend as backend (k8s apiserver)
user-client->>relay-server: POST /exec
relay-client->>relay-server: GET /request
relay-server-->>relay-client: exec
relay-client->>backend: POST /exec
activate backend
backend-->>relay-client: 101 Switching Protocols
relay-client->>relay-server: POST /response (101)
relay-server-->>user-client: 101 Switching Protocols
user-client->>relay-server: stdin
relay-client->>relay-server: POST /requeststream?id=$id
relay-server-->>relay-client: stdin
relay-client->>backend: stdin
backend-->>relay-client: stdout
deactivate backend
relay-client->>relay-server: POST /response (stdout)
relay-server-->>user-client: stdout
```
The relay-client side implementation is in `../http-relay-client`.
## Tested capabilities
The http-relay-server was originally designed as a way to use kubectl against remote clusters.
It traverses firewalls by only making outbound requests to the public internet from both the user client (eg kubectl, browser) and the remote cluster.
It has been tested with the following traffic:
- HTTP 1.1 & 2 from web browsers (including bidirectional streaming with websockets)
- HTTP 1.1 from kubectl, including streaming response bodies for `kubectl logs`
- SPDY from kubectl (via HTTP 101 Switching Protocols) for `kubectl exec`
- unidirectional gRPC (HTTP2 cleartext and HTTP trailers)
The following is known not to work:
- streaming gRPC fails with `rpc error: code = Internal desc = stream terminated by RST_STREAM with error code: PROTOCOL_ERROR`, root cause unknown
The following has not been tested:
- HTTP 1.1 streaming request body (`Transfer-Encoding: chunked` in the request header)
## Flags
* `--port`: Port number to listen on (default: 80).
* `--block_size`: Size of i/o buffer in bytes (default: 10240).
* `--inactive_request_timeout`: Timeout for inactive requests (default: 60s). In particular, this sets a limit on how long the backend can wait before writing headers and the response status.
## Configuration
### Nginx Timeout
If you are running the relay server behind Nginx, ensure that the proxy read timeout on Nginx is set such that Nginx doesn't time out before the http-relay-server does.
Specifically, the `nginx.ingress.kubernetes.io/proxy-read-timeout` annotation (or `proxy_read_timeout` directive in nginx config) should be set to a value larger than `--inactive_request_timeout`.
For example, if `--inactive_request_timeout` is set to `60s`, you might set `nginx.ingress.kubernetes.io/proxy-read-timeout` to `75s`.
================================================
FILE: src/go/cmd/http-relay-server/main.go
================================================
// Copyright 2023 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package main runs a multiplexing HTTP relay server.
//
// It exists to make HTTP endpoints on robots accessible without a public
// endpoint. It binds to a public endpoint accessible by both user-client and
// backend and works together with a relay-client that's colocated with the
// backend.
//
// For more details, see README.md.
package main
import (
"flag"
"log/slog"
"os"
"contrib.go.opencensus.io/exporter/stackdriver"
"github.com/googlecloudrobotics/core/src/go/cmd/http-relay-server/server"
"github.com/googlecloudrobotics/ilog"
"go.opencensus.io/trace"
)
var (
port = flag.Int("port", server.DefaultPort, "Port number to listen on")
blockSize = flag.Int("block_size", server.DefaultBlockSize,
"Size of i/o buffer in bytes")
stackdriverProjectID = flag.String("trace-stackdriver-project-id", "",
"If not empty, traces will be uploaded to this Google Cloud Project.")
logLevel = flag.Int("log_level", int(slog.LevelInfo),
"the log message level required to be logged")
inactiveRequestTimeout = flag.Duration("inactive_request_timeout", server.DefaultInactiveRequestTimeout,
"Timeout for inactive requests. In particular, this sets a limit on how long the backend can wait before writing headers and the response status.")
)
func main() {
flag.Parse()
logHandler := ilog.NewLogHandler(slog.Level(*logLevel), os.Stderr)
slog.SetDefault(slog.New(logHandler))
if *stackdriverProjectID != "" {
sd, err := stackdriver.NewExporter(stackdriver.Options{
ProjectID: *stackdriverProjectID,
})
if err != nil {
slog.Error("Failed to create the Stackdriver exporter", slog.String("Project", *stackdriverProjectID), ilog.Err(err))
os.Exit(1)
} else {
trace.RegisterExporter(sd)
defer sd.Flush()
}
}
server := server.NewServer(server.Config{
Port: *port,
BlockSize: *blockSize,
InactiveRequestTimeout: *inactiveRequestTimeout,
})
server.Start()
}
================================================
FILE: src/go/cmd/http-relay-server/server/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = [
"broker.go",
"server.go",
],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/http-relay-server/server",
deps = [
"//src/proto/http-relay:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promhttp:go_default_library",
"@io_opencensus_go//plugin/ochttp:go_default_library",
"@io_opencensus_go//plugin/ochttp/propagation/tracecontext:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_x_net//http2:go_default_library",
"@org_golang_x_net//http2/h2c:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)
go_test(
name = "go_default_test",
size = "small",
srcs = [
"broker_test.go",
"server_test.go",
],
embed = [":go_default_library"],
visibility = ["//visibility:private"],
deps = [
"//src/proto/http-relay:go_default_library",
"@com_github_getlantern_httptest//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
================================================
FILE: src/go/cmd/http-relay-server/server/broker.go
================================================
// Copyright 2023 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"fmt"
"log/slog"
"net/url"
"strings"
"sync"
"time"
pb "github.com/googlecloudrobotics/core/src/proto/http-relay"
"github.com/prometheus/client_golang/prometheus"
)
var (
brokerRequests = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "broker_requests",
Help: "Number of requests to the broker",
},
[]string{"method", "backend"},
)
brokerResponses = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "broker_responses",
Help: "Number of responses from the broker",
},
[]string{"method", "result", "backend"},
)
brokerResponseDurations = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "broker_responses_durations",
Help: "Time from request to final response in s",
},
[]string{"method", "backend"},
)
brokerBackendResponseDurations = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "broker_backend_responses_durations",
Help: "Time from backend request to final response in s",
},
[]string{"method", "backend"},
)
brokerOverheadDurations = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "broker_overhead_durations",
Help: "Extra time spend between relay server and client in s",
},
[]string{"method", "backend"},
)
)
func init() {
prometheus.MustRegister(brokerRequests)
prometheus.MustRegister(brokerResponses)
prometheus.MustRegister(brokerResponseDurations)
prometheus.MustRegister(brokerBackendResponseDurations)
prometheus.MustRegister(brokerOverheadDurations)
}
type pendingResponse struct {
// This channel is used to communicate data between the backend and user-client for
// bidirectional streaming connections.
requestStream chan []byte
// This mutex should be locked when writing to `requestStream``
requestStreamMutex sync.Mutex
// This channel is used to communicate data between the backend and user-client.
// The user-client sends a hanging request to the relay-server which blocks until
// data is received on the response channel.
responseStream chan *pb.HttpResponse
// This mutex should be locked when writing to `responseStream``
sendMutex sync.Mutex
lastActivity time.Time
// For diagnostics only.
startTime time.Time
requestPath string
// mark that the connection should be dropped
markReap chan struct{}
}
type RelayClientUnavailableError struct {
client string
}
func (e *RelayClientUnavailableError) Error() string {
return fmt.Sprintf("Cannot reach the client %q. Check that it's turned on, set up, and connected to the internet. (unknown client)", e.client)
}
// broker implements a thread-safe map for the request and response queues.
// Requests (req) are mapped by server-name. There is only channel per relay-
// client (identified by the server query parameter)
// Responses (resp) are mapped by stream id (randomly generated hex string).
// There can be multiple concurrent transfers per relay-client, each identified
// by a unique id query parameter.
type broker struct {
m sync.Mutex
req map[string]chan *pb.HttpRequest
resp map[string]*pendingResponse
}
func newBroker() *broker {
var r broker
r.req = make(map[string]chan *pb.HttpRequest)
r.resp = make(map[string]*pendingResponse)
return &r
}
// Healthy can be used for server health checks. If the server is deadlocked it
// will block forever.
func (r *broker) Healthy() error {
r.m.Lock()
defer r.m.Unlock()
return nil
}
// RelayRequest matches a pending relay client's request to the encapsulated
// request and returns a channel for the results.
func (r *broker) RelayRequest(server string, request *pb.HttpRequest) (<-chan *pb.HttpResponse, error) {
id := *request.Id
targetUrl, err := url.Parse(*request.Url)
if err != nil {
return nil, fmt.Errorf("Failed to parse URL: %v", err)
}
r.m.Lock()
if r.req[server] == nil {
// If we haven't seen this relay client before, immediately return error.
r.m.Unlock()
return nil, &RelayClientUnavailableError{client: server}
}
if r.resp[id] != nil {
r.m.Unlock()
return nil, fmt.Errorf("Multiple clients trying to handle request ID %s on server %s", id, server)
}
ts := time.Now()
r.resp[id] = &pendingResponse{
requestStream: make(chan []byte),
responseStream: make(chan *pb.HttpResponse),
lastActivity: ts,
startTime: ts,
requestPath: targetUrl.Path,
markReap: make(chan struct{}),
}
reqChan := r.req[server]
respChan := r.resp[id].responseStream
r.m.Unlock()
slog.Info("Enqueuing request", slog.String("ID", id))
brokerRequests.WithLabelValues("client", server).Inc()
select {
// This blocks until we get a free spot in the broker's request channel.
case reqChan <- request:
return respChan, nil
case <-time.After(10 * time.Second):
// This branch is triggered if the channel is not ready to consume the request
// since it is still busy with handling a different request.
return nil, fmt.Errorf("Cannot reach the client %q. Check that it's turned on, set up, and connected to the internet. If the network config recently changed, try again in 1-2 minutes. (timeout waiting for relay client to accept request)", server)
}
}
// StopRelayRequest forgets a relaying request, this causes the next chunk from the backend
// with the relay id to not be recognized, resulting in the relay server returning an error.
func (r *broker) StopRelayRequest(requestId string) {
r.m.Lock()
defer r.m.Unlock()
delete(r.resp, requestId)
}
// GetRequest obtains a client's request for the server identifier. It blocks
// until a client makes a request.
func (r *broker) GetRequest(ctx context.Context, server, path string) (*pb.HttpRequest, error) {
r.m.Lock()
if r.req[server] == nil {
// This happens when the relay-server started and a client connects before
// the relay-client connected.
r.req[server] = make(chan *pb.HttpRequest)
}
reqChan := r.req[server]
r.m.Unlock()
brokerRequests.WithLabelValues("server_request", server).Inc()
select {
case req := <-reqChan:
brokerResponses.WithLabelValues("server_request", "ok", server).Inc()
return req, nil
case <-time.After(time.Second * 30):
brokerResponses.WithLabelValues("server_request", "timeout", server).Inc()
return nil, fmt.Errorf("No request received within timeout")
case <-ctx.Done():
return nil, fmt.Errorf("Server is restarting")
}
}
// GetRequestStream gets data from the stream that follows a client's HTTP
// request. For example, when using `kubectl exec` this passes stdin data from
// the broker to the relay client.
// If no ongoing request matches the given ID, this returns ok=false.
func (r *broker) GetRequestStream(id string) ([]byte, bool) {
r.m.Lock()
pr := r.resp[id]
r.m.Unlock()
if pr == nil {
return nil, false
}
select {
case data := <-pr.requestStream:
return data, true
case <-time.After(time.Second * 30):
return []byte{}, true
}
}
// PutsRequestStream adds data from the stream that follows a client's HTTP
// request. For example, when using `kubectl exec` this passes stdin data from
// kubectl to the broker.
// If no ongoing request matches the given ID, this returns ok=false.
func (r *broker) PutRequestStream(id string, data []byte) bool {
r.m.Lock()
pr := r.resp[id]
if pr == nil {
r.m.Unlock()
return false
}
pr.requestStreamMutex.Lock()
defer pr.requestStreamMutex.Unlock()
r.m.Unlock()
select {
case pr.requestStream <- data:
break
case <-pr.markReap:
slog.Error("Error sending user client request to backend (Closed due to inactivity)", slog.String("ID", id))
return true
}
return true
}
// SendResponse delivers the HttpResponse to the user-client handler that created the
// request. It fails if and only if the request ID is not recognized.
func (r *broker) SendResponse(resp *pb.HttpResponse) error {
id := *resp.Id
backendName := strings.SplitN(id, ":", 2)[0]
r.m.Lock()
pr := r.resp[id]
if pr == nil {
r.m.Unlock()
brokerResponses.WithLabelValues("server_response", "not recognized or reaches the inactivity timeout", backendName).Inc()
return fmt.Errorf("Duplicate or invalid request ID %s", id)
}
// hold `sendMutex` throughout the function to ensure that `responseStream` is not closed
// while we are writing to it. We must acquire the lock while we are holding `r.m` to
// avoid `ReapInactiveRequests` closing `responseStream` between the time that we
// release `r.m` and lock `pr.sendMutex`.
pr.sendMutex.Lock()
defer pr.sendMutex.Unlock()
if resp.GetEof() {
// remove this request from the broker to prevent `ReapInactiveRequests` from processing (and closing `pr.responseStream`)
// a request that is about to be closed.
delete(r.resp, id)
} else {
pr.lastActivity = time.Now()
}
duration := time.Since(pr.startTime).Seconds()
// Release the lock on the broker before we write to `responseStream` so it does not
// block other requests.
r.m.Unlock()
select {
// Writing to this channel will notify consumers which are waiting for data
// on the channel returned by RelayRequest(). Note that the rate that we can write
// is limited by the rate that the user client consumes the stream.
case pr.responseStream <- resp:
break
case <-pr.markReap:
return fmt.Errorf("Closed due to inactivity")
}
brokerRequests.WithLabelValues("server_response", backendName).Inc()
brokerResponseDurations.WithLabelValues("server_response", backendName).Observe(duration)
if resp.GetEof() {
// this request is already removed from the broker earlier so `ReapInactiveRequests` will not
// process this and attempt to close the channel twice.
close(pr.responseStream)
backendDuration := (time.Duration(resp.GetBackendDurationMs()) * time.Millisecond).Seconds()
if backendDuration > 0.0 {
brokerBackendResponseDurations.WithLabelValues("server_response", backendName).Observe(backendDuration)
brokerOverheadDurations.WithLabelValues("server_response", backendName).Observe(duration - backendDuration)
}
slog.Info("Delivered final response to client", slog.String("ID", id), slog.Int("Bytes", len(resp.Body)), slog.Float64("Elapsed", duration), slog.Float64("BackendDuration", backendDuration))
} else {
slog.Info("Delivered response to client", slog.String("ID", id), slog.Int("Bytes", len(resp.Body)), slog.Float64("Elapsed", duration))
}
brokerResponses.WithLabelValues("server_response", "ok", backendName).Inc()
return nil
}
func (r *broker) ReapInactiveRequests(threshold time.Time) {
r.m.Lock()
for id, pr := range r.resp {
if pr.lastActivity.Before(threshold) {
slog.Info("Timeout on inactive request", slog.String("ID", id))
// Closing `pr.markReap` tells `SendResponse` and `PutRequestStream` to stop.
close(pr.markReap)
pr.requestStreamMutex.Lock()
close(pr.requestStream)
pr.requestStreamMutex.Unlock()
// If we block on this lock, it means `SendResponse` is writing to the channel, since we just closed
// `markReap`, it should release the lock soon and we can safely close the channel.
pr.sendMutex.Lock()
close(pr.responseStream)
pr.sendMutex.Unlock()
// Amazingly, this is safe in Go: https://stackoverflow.com/questions/23229975/is-it-safe-to-remove-selected-keys-from-map-within-a-range-loop
delete(r.resp, id)
}
}
r.m.Unlock()
}
================================================
FILE: src/go/cmd/http-relay-server/server/broker_test.go
================================================
// Copyright 2023 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"context"
"log/slog"
"sync"
"testing"
"time"
pb "github.com/googlecloudrobotics/core/src/proto/http-relay"
"google.golang.org/protobuf/proto"
)
const (
// These IDs are used for testing multiple requests. The contents of the
// strings are not important, as long as they are unique.
idOne = "idOne"
idTwo = "idTwo"
idThree = "idThree"
unknownID = "unknownID"
)
// brokerConn helps manage send/receive operations on the broker. Because the broker
// will immediately reject user clients for a backend which it has not seen before,
// we need to ensure that the backend side makes the connection first.
//
// Only one request id can be used for each `brokerConn`
type brokerConn struct {
ready chan struct{}
}
func newBrokerConn() brokerConn {
return brokerConn{
ready: make(chan struct{}),
}
}
func (bt *brokerConn) runSender(t *testing.T, b *broker, s string, m string, wg *sync.WaitGroup) {
<-bt.ready
respChan, err := b.RelayRequest(s, &pb.HttpRequest{Id: proto.String(m), Url: proto.String("http://example.com/foo")})
if err != nil {
t.Errorf("Got relay request error: %v", err)
}
resp, more := <-respChan
if want, got := m, string(resp.Body); want != got {
t.Errorf("Wrong response; want %s; got %s\n", want, got)
}
resp, more = <-respChan
if more {
t.Errorf("Got more than 1 response: %v", resp)
}
wg.Done()
}
func (bt *brokerConn) bakeRequest(b *broker, s string) {
// ensure that the request is created before the user client connects
b.m.Lock()
if _, found := b.req[s]; !found {
b.req[s] = make(chan *pb.HttpRequest)
}
b.m.Unlock()
close(bt.ready)
}
func (bt *brokerConn) runReceiver(t *testing.T, b *broker, s string, wg *sync.WaitGroup) {
bt.bakeRequest(b, s)
req, err := b.GetRequest(context.Background(), s, "/")
if err != nil {
t.Errorf("Error when getting request: %v", err)
}
err = b.SendResponse(&pb.HttpResponse{Id: req.Id, Body: []byte(*req.Id), Eof: proto.Bool(true)})
if err != nil {
t.Errorf("Error when sending response: %v", err)
}
wg.Done()
}
// runSenderStream expects two items in the response stream, and it doesn't care about the contents.
func (bt *brokerConn) runSenderStream(t *testing.T, b *broker, s string, m string, wg *sync.WaitGroup) {
<-bt.ready
respChan, err := b.RelayRequest(s, &pb.HttpRequest{Id: proto.String(m), Url: proto.String("http://example.com/foo")})
if err != nil {
t.Errorf("Got relay request error: %v", err)
}
// First response (Eof=false)
if _, more := <-respChan; !more {
t.Errorf("Got zero responses, want two.")
}
// Second response (Eof=true)
if _, more := <-respChan; !more {
t.Errorf("Got one response, want two.")
}
// Check that channel is closed.
if _, more := <-respChan; more {
t.Errorf("Got more than two responses, want two.")
}
wg.Done()
}
// runReceiverStream sends two items in the response stream, waiting before the second.
// It returns after the first response has been sent.
func (bt *brokerConn) runReceiverStream(t *testing.T, b *broker, s string, wg *sync.WaitGroup, done <-chan bool) {
bt.bakeRequest(b, s)
req, err := b.GetRequest(context.Background(), s, "/")
if err != nil {
t.Errorf("Error when getting request: %v", err)
}
err = b.SendResponse(&pb.HttpResponse{Id: req.Id, Body: []byte(*req.Id), Eof: proto.Bool(false)})
if err != nil {
t.Errorf("Error when sending response: %v", err)
}
go func() {
<-done
err = b.SendResponse(&pb.HttpResponse{Id: req.Id, Body: []byte(*req.Id), Eof: proto.Bool(true)})
if err != nil {
t.Errorf("Error when sending response: %v", err)
}
wg.Done()
}()
}
func TestNormalCase(t *testing.T) {
b := newBroker()
var bConns [3]brokerConn
for i := range bConns {
bConns[i] = newBrokerConn()
}
var wg sync.WaitGroup
wg.Add(6)
go bConns[0].runSender(t, b, "foo", idOne, &wg)
go bConns[1].runSender(t, b, "foo", idTwo, &wg)
go bConns[2].runSender(t, b, "bar", idThree, &wg)
go bConns[0].runReceiver(t, b, "foo", &wg)
go bConns[1].runReceiver(t, b, "foo", &wg)
go bConns[2].runReceiver(t, b, "bar", &wg)
wg.Wait()
}
func TestResponseStream(t *testing.T) {
b := newBroker()
bc := newBrokerConn()
var wg sync.WaitGroup
done := make(chan bool)
wg.Add(2)
go bc.runSenderStream(t, b, "foo", idOne, &wg)
bc.runReceiverStream(t, b, "foo", &wg, done)
done <- true
wg.Wait()
}
func TestMissingId(t *testing.T) {
b := newBroker()
err := b.SendResponse(&pb.HttpResponse{Id: proto.String(idOne)})
if err == nil {
t.Errorf("Invalid response did not produce an error")
}
}
func TestDuplicateId(t *testing.T) {
b := newBroker()
bc := newBrokerConn()
var wg sync.WaitGroup
wg.Add(2)
go bc.runSender(t, b, "foo", idOne, &wg)
go bc.runReceiver(t, b, "foo", &wg)
wg.Wait()
err := b.SendResponse(&pb.HttpResponse{Id: proto.String(idOne)})
if err == nil {
t.Errorf("Duplicate response did not produce an error")
}
}
func TestRequestStream(t *testing.T) {
// Start a request that won't terminate until we send `done`.
b := newBroker()
bc := newBrokerConn()
var wg sync.WaitGroup
done := make(chan bool)
wg.Add(2)
go bc.runSenderStream(t, b, "foo", idOne, &wg)
bc.runReceiverStream(t, b, "foo", &wg, done)
// Send a message over the request stream and assert that it arrives.
go func() {
ok := b.PutRequestStream(idOne, []byte("hello"))
if !ok {
t.Error("PutRequestStream(idOne, \"hello\") = false, want true")
}
}()
data, ok := b.GetRequestStream(idOne)
if !ok {
t.Error("data, ok := GetRequestStream(idOne); ok = false, want true")
}
if !bytes.Equal(data, []byte("hello")) {
t.Errorf("data, ok := GetRequestStream(idOne); data = %q, want \"hello\"", data)
}
// Complete the ongoing request.
done <- true
wg.Wait()
}
func TestRequestStreamUnknownID(t *testing.T) {
b := newBroker()
if ok := b.PutRequestStream(unknownID, []byte{}); ok {
t.Error("ok := PutRequestStream(unknownID, \"\"); ok = true, want false")
}
if _, ok := b.GetRequestStream(unknownID); ok {
t.Error("_, ok := GetRequestStream(unknownID; ok = true, want false")
}
}
func TestTimeout(t *testing.T) {
b := newBroker()
// create the request channel manually to avoid race condition between the 2
// goroutines below
b.req["foo"] = make(chan *pb.HttpRequest)
var wg sync.WaitGroup
wg.Add(2)
go func() {
respChan, err := b.RelayRequest("foo", &pb.HttpRequest{Id: proto.String(idOne), Url: proto.String("http://example.com/foo")})
if err != nil {
t.Errorf("Got relay request error: %v", err)
}
if _, more := <-respChan; more {
t.Errorf("Got unexpected response")
}
wg.Done()
}()
go func() {
slog.Info("Getting request")
b.GetRequest(context.Background(), "foo", "/")
slog.Info("Reaping inactive requests")
b.ReapInactiveRequests(time.Now().Add(10 * time.Second))
slog.Info("Done")
wg.Done()
}()
wg.Wait()
}
func TestReapWhileSendingResponse(t *testing.T) {
b := newBroker()
b.req["foo"] = make(chan *pb.HttpRequest)
// create a broker connection between the user client and backend side, but don't
// start sending data. "req" is backend side and "resp" is user client side.
var req *pb.HttpRequest
var reqErr error
// var resp <-chan *pb.HttpResponse
var respErr error
var wg sync.WaitGroup
wg.Add(2)
go func() {
req, reqErr = b.GetRequest(context.Background(), "foo", "/")
if reqErr != nil {
t.Errorf("GetRequest error:", reqErr)
}
wg.Done()
}()
go func() {
_, respErr = b.RelayRequest("foo", &pb.HttpRequest{Id: proto.String(idOne), Url: proto.String("http://example.com/foo")})
if respErr != nil {
t.Errorf("RelayRequest error:", respErr)
}
wg.Done()
}()
wg.Wait()
if reqErr != nil || respErr != nil {
t.Errorf("Error making broker connection")
}
// start sending response to user client, BUT do not start consuming the response.
wg.Add(1)
go func() {
reqErr = b.SendResponse(&pb.HttpResponse{Id: req.Id, Body: []byte(*req.Id), Eof: proto.Bool(false)})
if reqErr == nil || reqErr.Error() != "Closed due to inactivity" {
t.Errorf("Wrong SendResponse error or no error:", reqErr)
}
wg.Done()
}()
// FIXME(koonpeng): we need to wait for the goroutinue to be blocked on writing the response, currently
// there is no way to confirm that so we use a sleep.
time.Sleep(100 * time.Millisecond)
// reap the request
b.ReapInactiveRequests(time.Now().Add(10 * time.Second))
wg.Wait()
}
func TestReapWhileSendingRequest(t *testing.T) {
b := newBroker()
b.req["foo"] = make(chan *pb.HttpRequest)
// create a broker connection between the user client and backend side, but don't
// start sending data. "req" is backend side and "resp" is user client side.
var req *pb.HttpRequest
var reqErr error
// var resp <-chan *pb.HttpResponse
var respErr error
var wg sync.WaitGroup
wg.Add(2)
go func() {
req, reqErr = b.GetRequest(context.Background(), "foo", "/")
if reqErr != nil {
t.Errorf("GetRequest error:", reqErr)
}
wg.Done()
}()
go func() {
_, respErr = b.RelayRequest("foo", &pb.HttpRequest{Id: proto.String(idOne), Url: proto.String("http://example.com/foo")})
if respErr != nil {
t.Errorf("RelayRequest error:", respErr)
}
wg.Done()
}()
wg.Wait()
if reqErr != nil || respErr != nil {
t.Errorf("Error making broker connection")
}
// start sending response to backend, BUT do not start consuming on the backend side
wg.Add(1)
go func() {
if ok := b.PutRequestStream(*req.Id, []byte(*req.Id)); !ok {
t.Errorf("Error putting request stream")
}
wg.Done()
}()
// FIXME(koonpeng): we need to wait for the goroutinue to be blocked on writing the request, currently
// there is no way to confirm that so we use a sleep.
time.Sleep(100 * time.Millisecond)
// reap the request
b.ReapInactiveRequests(time.Now().Add(10 * time.Second))
wg.Wait()
}
================================================
FILE: src/go/cmd/http-relay-server/server/server.go
================================================
// Copyright 2023 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"encoding/hex"
"fmt"
"io"
"log/slog"
"math/rand"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"os/signal"
"strings"
"syscall"
"time"
pb "github.com/googlecloudrobotics/core/src/proto/http-relay"
"github.com/googlecloudrobotics/ilog"
"github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/protobuf/proto"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/plugin/ochttp/propagation/tracecontext"
"go.opencensus.io/trace"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
"golang.org/x/sync/errgroup"
)
const (
clientPrefix = "/client/"
// Time to wait for requests to complete before calling panic(). This should
// be less that the kubelet's timeout (30s by default) so that we can print
// a stack trace and debug what is still running.
cleanShutdownTimeout = 20 * time.Second
// Print more detailed logs when enabled.
debugLogs = false
// DefaultPort is the default port number to listen on.
DefaultPort = 80
// DefaultBlockSize is the default size of i/o buffer in bytes.
DefaultBlockSize = 10 * 1024
// DefaultInactiveRequestTimeout is the default timeout for inactive requests. In particular, this sets a limit on how long the backend can wait before writing headers and the response status.
DefaultInactiveRequestTimeout = 60 * time.Second
)
type Config struct {
// Port number to listen on.
Port int
// BlockSize is the size of i/o buffer in bytes.
BlockSize int
// InactiveRequestTimeout is the timeout for inactive requests.
InactiveRequestTimeout time.Duration
}
type Server struct {
conf Config
b *broker
}
func NewServer(conf Config) *Server {
if conf.Port == -1 { // let tests pass a value of 0 to let the os pick a port
conf.Port = DefaultPort
}
if conf.BlockSize == 0 {
conf.BlockSize = DefaultBlockSize
}
if conf.InactiveRequestTimeout == 0 {
conf.InactiveRequestTimeout = DefaultInactiveRequestTimeout
}
s := &Server{
conf: conf,
b: newBroker(),
}
go func() {
for t := range time.Tick(10 * time.Second) {
s.b.ReapInactiveRequests(t.Add(-1 * conf.InactiveRequestTimeout))
}
}()
return s
}
func createId() string {
u := make([]byte, 16)
// err is documented as always nil
rand.Read(u)
return hex.EncodeToString(u)
}
func marshalHeader(h *http.Header) []*pb.HttpHeader {
r := []*pb.HttpHeader{}
for k, vs := range *h {
for _, v := range vs {
r = append(r, &pb.HttpHeader{Name: proto.String(k), Value: proto.String(v)})
}
}
return r
}
func unmarshalHeader(w http.ResponseWriter, protoHeader []*pb.HttpHeader) {
for _, h := range protoHeader {
w.Header().Add(*h.Name, *h.Value)
}
}
func addServiceName(span *trace.Span) {
relayServerAttr := trace.StringAttribute("service.name", "http-relay-server")
span.AddAttributes(relayServerAttr)
}
func extractBackendNameAndPath(r *http.Request) (backendName string, path string, err error) {
if strings.HasPrefix(r.URL.Path, clientPrefix) {
// After stripping, the path is "${SERVER_NAME}/${REQUEST}"
pathParts := strings.SplitN(strings.TrimPrefix(r.URL.Path, clientPrefix), "/", 2)
backendName = pathParts[0]
if backendName == "" {
err = fmt.Errorf("Request path too short: missing remote server identifier")
return
}
path = "/"
if len(pathParts) > 1 {
path += pathParts[1]
}
} else {
// Requests without the /client/ prefix are gRPC requests. The backend is
// identified by "X-Server-Name" header.
headers, ok := r.Header["X-Server-Name"]
if !ok {
err = fmt.Errorf("Request without required header: \"X-Server-Name\"")
return
}
backendName = headers[0]
path = r.URL.Path
}
return
}
type responseChunk struct {
Body []byte
Trailers []*pb.HttpHeader
}
// responseFilter enforces that there's at least one HttpResponse in the 'in'
// channel and that the first response has a status code. It collects the
// responses and then returns headers and status-code. Additionally, it
// returns body and trailers asynchronously via the returned channel.
func (s *Server) responseFilter(backendCtx backendContext, in <-chan *pb.HttpResponse) ([]*pb.HttpHeader, int, <-chan *responseChunk) {
responseChunks := make(chan *responseChunk, 1)
firstMessage, more := <-in
if !more {
brokerResponses.WithLabelValues("client", "missing_message", backendCtx.ServerName).Inc()
responseChunks <- &responseChunk{
Body: []byte(fmt.Sprintf("Timeout after %v, indicating that the backend request took too long", s.conf.InactiveRequestTimeout)),
}
close(responseChunks)
return nil, http.StatusGatewayTimeout, responseChunks
}
if firstMessage.StatusCode == nil {
brokerResponses.WithLabelValues("client", "missing_header", backendCtx.ServerName).Inc()
responseChunks <- &responseChunk{
Body: []byte("Received no header from relay client"),
}
close(responseChunks)
// Flush remaining messages
for range in {
}
return nil, http.StatusInternalServerError, responseChunks
}
responseChunks <- &responseChunk{
Body: []byte(firstMessage.Body),
Trailers: []*pb.HttpHeader(firstMessage.Trailer),
}
go func() {
for backendResp := range in {
brokerResponses.WithLabelValues("client", "ok", backendCtx.ServerName).Inc()
responseChunks <- &responseChunk{
Body: []byte(backendResp.Body),
Trailers: []*pb.HttpHeader(backendResp.Trailer),
}
}
close(responseChunks)
}()
return firstMessage.Header, int(*firstMessage.StatusCode), responseChunks
}
type backendContext struct {
Id string
ServerName string
Path string
}
func newBackendContext(r *http.Request) (*backendContext, error) {
serverName, path, err := extractBackendNameAndPath(r)
if err != nil {
return nil, err
}
return &backendContext{
Id: serverName + ":" + createId(),
ServerName: serverName,
Path: path,
}, nil
}
func (s *Server) health(w http.ResponseWriter, r *http.Request) {
timer := time.AfterFunc(8*time.Second, func() {
slog.Warn("While checking server health, unable to acquire lock after 8 seconds")
})
defer timer.Stop()
if err := s.b.Healthy(); err != nil {
slog.Error("Health check failed", ilog.Err(err))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "text/plain")
w.Write([]byte("ok"))
}
// bidirectionalStream handles a 101 Switching Protocols response from the
// backend, by "hijacking" to get a bidirectional connection to the client,
// and streaming data between client and broker/relay client.
func (s *Server) bidirectionalStream(backendCtx backendContext, w http.ResponseWriter, responseChunks <-chan *responseChunk) {
hj, ok := w.(http.Hijacker)
if !ok {
http.Error(w, "Backend returned 101 Switching Protocols, which is not supported by the relay server", http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusSwitchingProtocols)
conn, bufrw, err := hj.Hijack()
if err != nil {
// After a failed hijack, the connection is in an unknown state and
// we can't report an error to the client.
slog.Error("Failed to hijack connection after 101", slog.String("ID", backendCtx.Id), ilog.Err(err))
return
}
slog.Info("Switched protocols", slog.String("ID", backendCtx.Id))
defer conn.Close()
go func() {
// This goroutine handles the request stream from client to backend.
slog.Info("Trying to read from bidi-stream", slog.String("ID", backendCtx.Id))
for {
// This must be a new buffer each time, as the channel is not making a copy
bytes := make([]byte, s.conf.BlockSize)
// Here we get the client stream (e.g. kubectl or k9s)
n, err := bufrw.Read(bytes)
if err != nil {
// TODO(https://github.com/golang/go/issues/4373): in Go 1.13,
// we may be able to suppress the "read from closed connection" better.
if strings.Contains(err.Error(), "use of closed network connection") {
// Request ended and connection closed by HTTP server.
slog.Info("End of bidi-stream stream (closed socket)", slog.String("ID", backendCtx.Id))
} else {
// Connection has unexpectedly failed for some other reason.
slog.Error("Error reading from bidi-stream", slog.String("ID", backendCtx.Id), ilog.Err(err))
}
return
}
slog.Info("Read from bidi-stream", slog.String("ID", backendCtx.Id), slog.Int("Bytes", n))
if ok = s.b.PutRequestStream(backendCtx.Id, bytes[:n]); !ok {
slog.Info("End of bidi-stream stream", slog.String("ID", backendCtx.Id))
return
}
slog.Info("Uploaded from bidi-stream", slog.String("ID", backendCtx.Id), slog.Int("Bytes", n))
}
}()
numBytes := 0
for responseChunk := range responseChunks {
if _, err = bufrw.Write(responseChunk.Body); err != nil {
slog.Error("Error writing response to bidi-stream", slog.String("ID", backendCtx.Id), ilog.Err(err))
return
}
bufrw.Flush()
numBytes += len(responseChunk.Body)
}
slog.Info("Wrote response chunk to bidi-stream", slog.String("ID", backendCtx.Id), slog.Int("Bytes", numBytes))
}
func (s *Server) readRequestBody(ctx context.Context, r *http.Request) ([]byte, error) {
_, span := trace.StartSpan(ctx, "Read request body")
addServiceName(span)
defer span.End()
return io.ReadAll(r.Body)
}
func (s *Server) createBackendRequest(backendCtx backendContext, r *http.Request, body []byte) *pb.HttpRequest {
backendUrl := url.URL{
Scheme: "http",
Host: "invalid",
Path: backendCtx.Path,
RawQuery: r.URL.RawQuery,
Fragment: r.URL.Fragment,
}
backendReq := &pb.HttpRequest{
Id: proto.String(backendCtx.Id),
Method: proto.String(r.Method),
Host: proto.String(r.Host),
Url: proto.String(backendUrl.String()),
Header: marshalHeader(&r.Header),
Body: body,
}
return backendReq
}
func (s *Server) relayRequest(ctx context.Context, backendCtx backendContext, request *pb.HttpRequest) (<-chan *pb.HttpResponse, error) {
_, span := trace.StartSpan(ctx, "Schedule request for pickup")
addServiceName(span)
defer span.End()
backendRespChan, err := s.b.RelayRequest(backendCtx.ServerName, request)
if err != nil {
return nil, err
}
return backendRespChan, nil
}
func (s *Server) waitForFirstResponseAndHandleSwitching(ctx context.Context, backendCtx backendContext, w http.ResponseWriter, backendRespChan <-chan *pb.HttpResponse) ([]*pb.HttpHeader, <-chan *responseChunk, bool) {
_, span := trace.StartSpan(ctx, "Waiting for first response")
addServiceName(span)
defer span.End()
header, status, responseChunksChan := s.responseFilter(backendCtx, backendRespChan)
if header != nil {
unmarshalHeader(w, header)
}
if status == http.StatusSwitchingProtocols {
span.AddAttributes(trace.StringAttribute("notes", "Received 101 switching protocols."))
// Note: call s.bidirectionalStream before w.WriteHeader so that
// bidirectionalStream can set the status on error.
// TODO(haukeheibel): I don't get this comment. We never write the
// header and just return.
s.bidirectionalStream(backendCtx, w, responseChunksChan)
return nil, nil, true
}
w.WriteHeader(status)
return header, responseChunksChan, false
}
// This function is used to handle requests by the user-client.
// This is e.g. a browser request.
func (s *Server) userClientRequest(w http.ResponseWriter, r *http.Request) {
f := &tracecontext.HTTPFormat{}
var span *trace.Span
ctx := r.Context()
if sctx, ok := f.SpanContextFromRequest(r); ok {
ctx, span = trace.StartSpanWithRemoteParent(ctx, "Received user client request "+r.URL.Path, sctx)
} else {
ctx, span = trace.StartSpan(ctx, "Received user client request "+r.URL.Path)
}
addServiceName(span)
// Embedding the span in the request ensures that the server side spans are correctly
// nested.
// Note: We are overwriting the previous one with the current one which is not a
// problem since we have already read the previous one.
f.SpanContextToRequest(span.SpanContext(), r)
// We can actually defer span.end() since this function will wait until a response from
// a server is being received.
defer span.End()
if debugLogs {
dump, _ := httputil.DumpRequest(r, false)
slog.Info("Received user client request", slog.String("HttpRequest", string(dump)))
}
backendCtx, err := newBackendContext(r)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
body, err := s.readRequestBody(ctx, r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
backendReq := s.createBackendRequest(*backendCtx, r, body)
// Pipe a request into the request channel to it get polled by the relay client.
// Then return the response channel, so we can pass it on and wait on a response
// from the relay-client.
backendRespChan, err := s.relayRequest(ctx, *backendCtx, backendReq)
if err != nil {
if _, ok := err.(*RelayClientUnavailableError); ok {
w.WriteHeader(http.StatusServiceUnavailable)
w.Header().Set("X-CLOUDROBOTICS-HTTP-RELAY", backendCtx.Id)
w.Write([]byte(err.Error()))
return
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer s.b.StopRelayRequest(backendCtx.Id)
header, responseChunksChan, done := s.waitForFirstResponseAndHandleSwitching(ctx, *backendCtx, w, backendRespChan)
if done {
return
}
_, forwardingResponseSpan := trace.StartSpan(ctx, "Forwarding backend response to user-client")
addServiceName(forwardingResponseSpan)
defer forwardingResponseSpan.End()
// This code here will block until we have actually received a response from the backend,
// i.e. this will block until
numBytes := 0
for responseChunk := range responseChunksChan {
if _, err = w.Write(responseChunk.Body); err != nil {
slog.Error("Error writing response to user-client", slog.String("ID", backendCtx.Id), ilog.Err(err))
return
}
if flush, ok := w.(http.Flusher); ok {
flush.Flush()
}
numBytes += len(responseChunk.Body)
// Only the last chunk will actually contain trailers.
for _, h := range responseChunk.Trailers {
w.Header().Add(http.TrailerPrefix+*h.Name, *h.Value)
slog.Info("Adding real trailer", slog.String("ID", backendCtx.Id), slog.String("Name", *h.Name), slog.String("Value", *h.Value))
}
}
// TODO(ensonic): open questions:
// - can we do this less hacky? (see unmarshalHeader() above)
// - why do we not always get them as trailers?
for _, h := range header {
if strings.HasPrefix(*h.Name, "Grpc-") {
w.Header().Add(http.TrailerPrefix+*h.Name, *h.Value)
slog.Info("Adding trailer from header", slog.String("ID", backendCtx.Id), slog.String("Name", *h.Name), slog.String("Value", *h.Value))
}
}
slog.Info("Wrote response chunk to request", slog.String("ID", backendCtx.Id), slog.Int("Bytes", numBytes))
}
// relay-client pulls a request
func (s *Server) serverRequest(w http.ResponseWriter, r *http.Request) {
server := r.URL.Query().Get("server")
if server == "" {
http.Error(w, "Missing server query parameter", http.StatusBadRequest)
return
}
slog.Info("Relay client connected", slog.String("ServerName", server))
// Get pending request from client and sent as a reply to the relay-client.
request, err := s.b.GetRequest(r.Context(), server, r.URL.Path)
if err != nil {
// Expected if the relay is idle, or if the server is restarting.
slog.Debug("Relay client got no request", slog.String("ID", server), ilog.Err(err))
http.Error(w, err.Error(), http.StatusRequestTimeout)
return
}
body, err := proto.Marshal(request)
if err != nil {
slog.Error("Failed to marshal request", slog.String("ID", *request.Id), ilog.Err(err))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/vnd.google.protobuf;proto=cloudrobotics.http_relay.v1alpha1.HttpRequest")
w.Write(body)
slog.Info("Relay client accepted request", slog.String("ID", *request.Id))
}
func (s *Server) serverRequestStream(w http.ResponseWriter, r *http.Request) {
id := r.URL.Query().Get("id")
if id == "" {
http.Error(w, "Missing id query parameter", http.StatusBadRequest)
return
}
data, ok := s.b.GetRequestStream(id)
if !ok {
// Using the 410 Gone error tells the relay client that this request
// has completed.
http.Error(w, "No ongoing request with id "+id, http.StatusGone)
return
}
w.Header().Set("Content-Type", "application/octet-data")
w.Write(data)
slog.Info("Relay client pulled streamed request chunk", slog.String("ID", id), slog.Int("Bytes", len(data)))
}
// This function receives the response from the relay-client after it processed
// the initial request in the backend.
// The response is stored in the response channel through which the data is relayed
// to the initial requester.
func (s *Server) serverResponse(w http.ResponseWriter, r *http.Request) {
body, err := io.ReadAll(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
br := &pb.HttpResponse{}
if err = proto.Unmarshal(body, br); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Send the response to the actual user-client using our broker.
if err = s.b.SendResponse(br); err != nil {
// SendResponse fails if and only if the request ID is bad.
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Respond to the relay-client and notify on successful propagation of
// the backend response.
w.Header().Set("Content-Type", "text/plain")
w.Write([]byte("ok"))
slog.Info("Relay client sent response", slog.String("ID", *br.Id))
}
func (s *Server) Start() {
h := http.NewServeMux()
h.HandleFunc("/healthz", s.health)
h.HandleFunc("/", s.userClientRequest)
h.HandleFunc("/server/request", s.serverRequest)
h.HandleFunc("/server/requeststream", s.serverRequestStream)
h.HandleFunc("/server/response", s.serverResponse)
h.Handle("/metrics", promhttp.Handler())
// This context will be terminated we get SIGTERM from Kubernetes. We need
// some boilerplate to make this terminate the HTTP server and the ongoing
// request contexts. This is based on:
// https://www.rudderstack.com/blog/implementing-graceful-shutdown-in-go/#:~:text=Canceling%20long%20running%20requests
mainCtx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer stop()
h2s := &http2.Server{}
h2h := h2c.NewHandler(h, h2s)
och := &ochttp.Handler{
Handler: h2h,
}
h1s := &http.Server{
Addr: fmt.Sprintf(":%d", s.conf.Port),
Handler: och,
BaseContext: func(l net.Listener) context.Context {
slog.Info("Relay server listening", slog.Int("Port", l.Addr().(*net.TCPAddr).Port))
return mainCtx
},
}
// Wait for the server to terminate, either because it failed to create a
// listener, or because we got SIGTERM.
g, gCtx := errgroup.WithContext(mainCtx)
g.Go(func() error {
if err := h1s.ListenAndServe(); err != http.ErrServerClosed {
return err
}
// ErrServerClosed follows SIGTERM which is normal when updating the
// server.
return nil
})
g.Go(func() error {
<-gCtx.Done()
ctx, cancel := context.WithTimeout(context.Background(), cleanShutdownTimeout)
defer cancel()
return h1s.Shutdown(ctx)
})
if err := g.Wait(); err != nil {
// SIGTERM indicates either a normal shutdown (eg pod update, node pool
// update) or a failed liveness check (eg broker deadlock), we can't
// easily tell. We panic to help debugging: if the environment sets
// GOTRACEBACK=all they will see stacktraces after the panic.
slog.Error("Server terminated abnormally", ilog.Err(err))
panic("Server terminated abnormally")
}
}
================================================
FILE: src/go/cmd/http-relay-server/server/server_test.go
================================================
// Copyright 2023 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/http/httptest"
"strings"
"sync"
"testing"
pb "github.com/googlecloudrobotics/core/src/proto/http-relay"
hijacktest "github.com/getlantern/httptest"
"google.golang.org/protobuf/proto"
)
func checkResponse(t *testing.T, resp *http.Response, wantStatus int, wantBody string) {
t.Helper()
if want, got := wantStatus, resp.StatusCode; want != got {
t.Errorf("Wrong response code; want %d; got %d", want, got)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Errorf("Failed to read body stream")
}
if want, got := wantBody, string(body); want != got {
t.Errorf("Wrong body; want %s; got %s", want, got)
}
}
func TestClientHandler(t *testing.T) {
req := httptest.NewRequest("GET", "/client/foo/bar?a=b#c", strings.NewReader("body"))
req.Header.Add("X-Deadline", "now")
respRecorder := httptest.NewRecorder()
server := NewServer(Config{})
wg := sync.WaitGroup{}
wg.Add(1)
go func() { server.userClientRequest(respRecorder, req); wg.Done() }()
relayRequest, err := server.b.GetRequest(context.Background(), "foo", "/")
if err != nil {
t.Errorf("Error when getting request: %v", err)
}
wantRequest := &pb.HttpRequest{
Id: relayRequest.Id,
Method: proto.String("GET"),
Host: proto.String("example.com"),
Url: proto.String("http://invalid/bar?a=b#c"),
Header: []*pb.HttpHeader{{
Name: proto.String("X-Deadline"),
Value: proto.String("now"),
}},
Body: []byte("body"),
}
// Remove the Traceparent header entry since we cannot assert on its value.
tempHeader := relayRequest.Header[:0]
for _, header := range relayRequest.Header {
if *header.Name != "Traceparent" {
tempHeader = append(tempHeader, header)
}
}
relayRequest.Header = tempHeader
if !proto.Equal(wantRequest, relayRequest) {
t.Errorf("Wrong encapsulated request; want %s; got '%s'", wantRequest, relayRequest)
}
server.b.SendResponse(&pb.HttpResponse{
Id: relayRequest.Id,
StatusCode: proto.Int32(201),
Header: []*pb.HttpHeader{{
Name: proto.String("X-GFE"),
Value: proto.String("google.com"),
}},
Body: []byte("thebody"),
Trailer: []*pb.HttpHeader{{
Name: proto.String("Some-Trailer"),
Value: proto.String("trailer value"),
}},
Eof: proto.Bool(true),
})
wg.Wait()
resp := respRecorder.Result()
checkResponse(t, resp, 201, "thebody")
if want, got := 1, len(resp.Header); want != got {
t.Errorf("Wrong # of headers; want %d; got %d", want, got)
}
if want, got := "google.com", resp.Header.Get("X-GFE"); want != got {
t.Errorf("Wrong header value; want %s; got %s", want, got)
}
if want, got := 1, len(resp.Trailer); want != got {
t.Errorf("Wrong # of trailers; want %d; got %d", want, got)
}
if want, got := "trailer value", resp.Trailer.Get("Some-Trailer"); want != got {
t.Errorf("Wrong trailer value; want %s; got %s", want, got)
}
}
func TestClientHandlerWithChunkedResponse(t *testing.T) {
req := httptest.NewRequest("GET", "/client/foo/bar?a=b#c", strings.NewReader("body"))
req.Header.Add("X-Deadline", "now")
respRecorder := httptest.NewRecorder()
server := NewServer(Config{})
wg := sync.WaitGroup{}
wg.Add(1)
go func() { server.userClientRequest(respRecorder, req); wg.Done() }()
relayRequest, err := server.b.GetRequest(context.Background(), "foo", "/")
if err != nil {
t.Errorf("Error when getting request: %v", err)
}
wantRequest := &pb.HttpRequest{
Id: relayRequest.Id,
Method: proto.String("GET"),
Host: proto.String("example.com"),
Url: proto.String("http://invalid/bar?a=b#c"),
Header: []*pb.HttpHeader{{
Name: proto.String("X-Deadline"),
Value: proto.String("now"),
}},
Body: []byte("body"),
}
// Remove the Traceparent header entry since we cannot assert on its value.
tempHeader := relayRequest.Header[:0]
for _, header := range relayRequest.Header {
if *header.Name != "Traceparent" {
tempHeader = append(tempHeader, header)
}
}
relayRequest.Header = tempHeader
if !proto.Equal(wantRequest, relayRequest) {
t.Errorf("Wrong encapsulated request; want %s; got '%s'", wantRequest, relayRequest)
}
server.b.SendResponse(&pb.HttpResponse{
Id: relayRequest.Id,
StatusCode: proto.Int32(201),
Header: []*pb.HttpHeader{{
Name: proto.String("X-GFE"),
Value: proto.String("google.com"),
}},
Body: []byte("the"),
})
server.b.SendResponse(&pb.HttpResponse{
Id: relayRequest.Id,
Body: []byte("body"),
Trailer: []*pb.HttpHeader{{
Name: proto.String("Some-Trailer"),
Value: proto.String("trailer value"),
}},
Eof: proto.Bool(true),
})
wg.Wait()
resp := respRecorder.Result()
checkResponse(t, resp, 201, "thebody")
if want, got := 1, len(resp.Header); want != got {
t.Errorf("Wrong # of headers; want %d; got %d", want, got)
}
if want, got := "google.com", resp.Header.Get("X-GFE"); want != got {
t.Errorf("Wrong header value; want %s; got %s", want, got)
}
if want, got := 1, len(resp.Trailer); want != got {
t.Errorf("Wrong # of trailers; want %d; got %d", want, got)
}
if want, got := "trailer value", resp.Trailer.Get("Some-Trailer"); want != got {
t.Errorf("Wrong trailer value; want %s; got %s", want, got)
}
}
func TestClientBadRequest(t *testing.T) {
tests := []struct {
desc string
req *http.Request
wantCode int
wantMsg string
}{
{
desc: "url-path misses the backend name and path",
req: httptest.NewRequest("GET", "/client/", strings.NewReader("body")),
wantCode: 400,
wantMsg: "Request path too short:",
},
{
desc: "url-path misses the backend header",
req: httptest.NewRequest("GET", "/", strings.NewReader("body")),
wantCode: 400,
wantMsg: "Request without required header:",
},
}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
respRecorder := httptest.NewRecorder()
server := NewServer(Config{})
wg := sync.WaitGroup{}
wg.Add(1)
go func() { server.userClientRequest(respRecorder, tc.req); wg.Done() }()
wg.Wait()
resp := respRecorder.Result()
if got := resp.StatusCode; tc.wantCode != got {
t.Errorf("Wrong response code; want %d; got %d", tc.wantCode, got)
}
if tc.wantMsg != "" {
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Errorf("Failed to read body stream: %v", err)
}
if !strings.Contains(string(body), tc.wantMsg) {
t.Errorf("Wrong response body; want %q; got %q", tc.wantMsg, body)
}
}
})
}
}
func nonRepeatingByteArray(n int) []byte {
result := make([]byte, 0, n)
i := 0
for len(result) < n {
result = append(result, []byte(fmt.Sprintf("%8d", i))...)
i += 1
}
return result
}
func TestRequestStreamHandler(t *testing.T) {
blockSize := 64
wantRequestStream := nonRepeatingByteArray(3 * blockSize)
// In a background goroutine, run a client request with post-request data
// in the request stream.
req := httptest.NewRequest("GET", "/client/foo/bar?a=b#c", strings.NewReader("body"))
req.Header.Add("X-Deadline", "now")
respRecorder := hijacktest.NewRecorder(wantRequestStream)
server := NewServer(Config{BlockSize: blockSize})
wg := sync.WaitGroup{}
wg.Add(1)
go func() { server.userClientRequest(respRecorder, req); wg.Done() }()
// Simulate a 101 Switching Protocols response from the backend.
relayRequest, err := server.b.GetRequest(context.Background(), "foo", "/")
if err != nil {
t.Errorf("Error when getting request: %v", err)
}
server.b.SendResponse(&pb.HttpResponse{
Id: relayRequest.Id,
StatusCode: proto.Int32(101),
Header: []*pb.HttpHeader{{
Name: proto.String("Upgrade"),
Value: proto.String("SPDY/3.1"),
}},
Body: []byte("the"),
})
// Get the data from the request stream and check its contents.
gotRequestStream := []byte{}
for len(gotRequestStream) < len(wantRequestStream) {
reqstreamRecorder := httptest.NewRecorder()
streamreq := httptest.NewRequest("POST", "/server/requeststream?id="+*relayRequest.Id, nil)
server.serverRequestStream(reqstreamRecorder, streamreq)
switch sc := reqstreamRecorder.Result().StatusCode; sc {
case http.StatusOK:
gotRequestStream = append(gotRequestStream, reqstreamRecorder.Body.Bytes()...)
case http.StatusGone:
break
default:
t.Errorf("POST /server/requeststream returned unexpected status %d, want %d or %d", sc, http.StatusOK, http.StatusGone)
}
}
if !bytes.Equal(wantRequestStream, gotRequestStream) {
t.Errorf("POST /server/requeststream returned unexpected data, got:\n%s\nwant:\n%s", gotRequestStream, wantRequestStream)
}
// Terminate the client request and verify the response.
server.b.SendResponse(&pb.HttpResponse{
Id: relayRequest.Id,
Body: []byte("body"),
Eof: proto.Bool(true),
})
wg.Wait()
checkResponse(t, respRecorder.Result(), 101, "thebody")
}
func TestServerRequestResponseHandler(t *testing.T) {
backendReq := &pb.HttpRequest{
Id: proto.String("15"),
Method: proto.String("GET"),
Url: proto.String("http://invalid/my/url"),
Header: []*pb.HttpHeader{{
Name: proto.String("X-GFE"),
Value: proto.String("google.com"),
}},
Body: []byte("thebody"),
}
backendResp := &pb.HttpResponse{
Id: backendReq.Id,
StatusCode: proto.Int32(201),
Header: []*pb.HttpHeader{{
Name: proto.String("X-GFE"),
Value: proto.String("google.com"),
}},
Body: []byte("thebody"),
Eof: proto.Bool(true),
}
backendRespBody, err := proto.Marshal(backendResp)
if err != nil {
t.Errorf("Failed to marshal test response: %v", err)
}
req := httptest.NewRequest("GET", "/server/request?server=b", strings.NewReader(""))
resp := httptest.NewRequest("POST", "/server/response", bytes.NewReader(backendRespBody))
reqRecorder := httptest.NewRecorder()
respRecorder := httptest.NewRecorder()
server := NewServer(Config{})
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
server.serverRequest(reqRecorder, req)
server.serverResponse(respRecorder, resp)
wg.Done()
}()
// create the request channel to avoid 503 error for unknown clients.
server.b.req["b"] = make(chan *pb.HttpRequest)
serverRespChan, err := server.b.RelayRequest("b", backendReq)
if err != nil {
t.Errorf("Got relay request error: %v", err)
}
serverResp := <-serverRespChan
wg.Wait()
if want, got := 200, reqRecorder.Result().StatusCode; want != got {
t.Errorf("Wrong response code; want %d; got %d", want, got)
}
body, err := io.ReadAll(reqRecorder.Result().Body)
if err != nil {
t.Errorf("Failed to read body stream: %v", err)
}
if !strings.Contains(string(body), "/my/url") {
t.Errorf("Serialize request didn't contain URL: %s", string(body))
}
if !strings.Contains(string(body), "X-GFE") {
t.Errorf("Serialize request didn't contain header: %s", string(body))
}
if want, got := 200, respRecorder.Result().StatusCode; want != got {
t.Errorf("Wrong response code; want %d; got %d", want, got)
}
if !proto.Equal(serverResp, backendResp) {
t.Errorf("Encapsulated response was garbled; want %s; got %s", backendResp, serverResp)
}
}
func TestServerResponseHandlerWithInvalidRequestID(t *testing.T) {
backendResp := &pb.HttpResponse{
Id: proto.String("not found"),
StatusCode: proto.Int32(201),
Header: []*pb.HttpHeader{{
Name: proto.String("X-GFE"),
Value: proto.String("google.com"),
}},
Body: []byte("thebody"),
Eof: proto.Bool(true),
}
backendRespBody, err := proto.Marshal(backendResp)
if err != nil {
t.Errorf("Failed to marshal test response: %v", err)
}
resp := httptest.NewRequest("POST", "/server/response", bytes.NewReader(backendRespBody))
respRecorder := httptest.NewRecorder()
server := NewServer(Config{})
server.serverResponse(respRecorder, resp)
if want, got := http.StatusBadRequest, respRecorder.Result().StatusCode; want != got {
t.Errorf("serverResponse() gave wrong status code; want %d; got %d", want, got)
}
}
// Test that a user client request to a backend that has not been seen before
// immediately returns 503 Service Unavailable.
func TestRequestToUnknownBackendResponse503(t *testing.T) {
req := httptest.NewRequest("GET", "/client/test/path", bytes.NewReader([]byte{}))
respRecorder := httptest.NewRecorder()
server := NewServer(Config{})
server.userClientRequest(respRecorder, req)
if respRecorder.Code != http.StatusServiceUnavailable {
t.Errorf("Expected status 503, got %d", respRecorder.Code)
}
body, err := io.ReadAll(respRecorder.Body)
if err != nil {
t.Fatal(err)
}
expected := []byte("Cannot reach the client \"test\"")
if !bytes.HasPrefix(body, expected) {
t.Errorf("Unexpected body prefix\nWant: %s\nGot: %s", expected, body)
}
if respRecorder.Header().Get("X-CLOUDROBOTICS-HTTP-RELAY") == "" {
t.Error("Missing X-CLOUDROBOTICS-HTTP-RELAY header")
}
}
================================================
FILE: src/go/cmd/hw-exporter/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
load("@rules_oci//oci:defs.bzl", "oci_image")
load("@rules_pkg//pkg:tar.bzl", "pkg_tar")
go_library(
name = "go_default_library",
srcs = ["main.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/hw-exporter",
visibility = ["//visibility:private"],
deps = [
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@com_github_jaypipes_ghw//:go_default_library",
"@com_github_jaypipes_ghw//pkg/option:go_default_library",
"@com_github_jaypipes_ghw//pkg/util:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promhttp:go_default_library",
],
)
go_binary(
name = "hw-exporter",
embed = [":go_default_library"],
visibility = ["//visibility:public"],
)
pkg_tar(
name = "hw-exporter-image-layer",
srcs = [":hw-exporter"],
extension = "tar.gz",
)
oci_image(
name = "hw-exporter-image",
base = "@distroless_base",
entrypoint = ["/hw-exporter"],
tars = [":hw-exporter-image-layer"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["main_test.go"],
embed = [":go_default_library"],
deps = [
"@com_github_jaypipes_ghw//:go_default_library",
"@com_github_jaypipes_ghw//pkg/option:go_default_library",
"@com_github_jaypipes_ghw//pkg/pci:go_default_library",
"@com_github_jaypipes_ghw//pkg/util:go_default_library",
"@com_github_jaypipes_pcidb//:go_default_library",
"@com_github_prometheus_client_golang//prometheus/testutil:go_default_library",
],
)
================================================
FILE: src/go/cmd/hw-exporter/main.go
================================================
// hw-exporter exposes a Prometheus metric pci_device_count that indicates the
// number of each PCI device type (vendor/product/class/driver) installed on
// this node.
package main
import (
"context"
"flag"
"fmt"
"log/slog"
"net/http"
"os"
"os/signal"
"syscall"
"github.com/googlecloudrobotics/ilog"
"github.com/jaypipes/ghw"
"github.com/jaypipes/ghw/pkg/option"
"github.com/jaypipes/ghw/pkg/util"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
metricsPort = flag.Int("metrics-port", 9999, "Port to expose Prometheus metrics on.")
logLevel = flag.Int("log-level", int(slog.LevelInfo), "the log message level required to be logged")
chroot = flag.String("chroot", "/", "Path to chroot into before collecting hardware info.")
)
type pciCollector struct {
pciDeviceCount *prometheus.Desc
}
func newPciCollector() *pciCollector {
return &pciCollector{
pciDeviceCount: prometheus.NewDesc(
"pci_device_count",
"Number of PCI devices by vendor, product, class, and driver.",
[]string{"vendor", "product", "class", "driver"},
nil,
),
}
}
// Describe implements the prometheus.Collector interface.
func (c *pciCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- c.pciDeviceCount
}
// getNameOrID returns the name of a PCI device component, or its ID if the name is unknown.
func getNameOrID(name, id string) string {
if name == util.UNKNOWN {
return "0x" + id
}
return name
}
// Collect implements the prometheus.Collector interface, counting the number of
// devices by vendor/product/class/driver.
func (c *pciCollector) Collect(ch chan<- prometheus.Metric) {
pciInfo, err := ghw.PCI(&option.Option{Chroot: chroot})
if err != nil {
slog.Error("Failed to get PCI info", ilog.Err(err))
return
}
deviceCounts := make(map[[4]string]int)
for _, device := range pciInfo.Devices {
vendor := getNameOrID(device.Vendor.Name, device.Vendor.ID)
product := getNameOrID(device.Product.Name, device.Product.ID)
class := getNameOrID(device.Class.Name, device.Class.ID)
labels := [4]string{vendor, product, class, device.Driver}
deviceCounts[labels]++
}
for labels, count := range deviceCounts {
ch <- prometheus.MustNewConstMetric(c.pciDeviceCount, prometheus.GaugeValue, float64(count), labels[0], labels[1], labels[2], labels[3])
}
}
func main() {
flag.Parse()
logHandler := ilog.NewLogHandler(slog.Level(*logLevel), os.Stderr)
slog.SetDefault(slog.New(logHandler))
// Run once on startup to test container setup, this is useful during development.
_, err := ghw.PCI(&option.Option{Chroot: chroot})
if err != nil {
slog.Error("Failed to get PCI info", ilog.Err(err))
os.Exit(1)
}
// Construct and run the metrics server until stopped by k8s (or Ctrl+C).
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGTERM, syscall.SIGINT)
defer cancel()
registry := prometheus.NewRegistry()
registry.MustRegister(newPciCollector())
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{}))
server := &http.Server{
Addr: fmt.Sprintf(":%d", *metricsPort),
Handler: mux,
}
go func() {
slog.Info("Starting metrics server", slog.Int("port", *metricsPort))
if err := server.ListenAndServe(); err != http.ErrServerClosed {
slog.Error("Metrics server failed", ilog.Err(err))
os.Exit(1)
}
}()
// Call Shutdown() in the main goroutine because ListenAndServe() returns
// immediately but if the main goroutine ends then, the process will stop
// before finishing any ongoing requests.
<-ctx.Done()
slog.Info("Shutting down metrics server...")
server.Shutdown(context.Background())
}
================================================
FILE: src/go/cmd/hw-exporter/main_test.go
================================================
package main
import (
"strings"
"testing"
"github.com/jaypipes/ghw"
"github.com/jaypipes/ghw/pkg/option"
"github.com/jaypipes/ghw/pkg/pci"
"github.com/jaypipes/ghw/pkg/util"
"github.com/jaypipes/pcidb"
"github.com/prometheus/client_golang/prometheus/testutil"
)
var (
testVendor = pcidb.Vendor{
Name: "Intel Corporation",
ID: "8086",
}
unknownVendor = pcidb.Vendor{
Name: util.UNKNOWN,
ID: "1234",
}
testProduct = pcidb.Product{
Name: "Ethernet Connection (17) I219-LM",
ID: "1a1c",
}
unknownProduct = pcidb.Product{
Name: util.UNKNOWN,
ID: "5678",
}
testClass = pcidb.Class{
Name: "Ethernet controller",
ID: "0200",
}
unknownClass = pcidb.Class{
Name: util.UNKNOWN,
ID: "9abc",
}
)
func TestPciCollector_Collect(t *testing.T) {
oldPCI := ghw.PCI
t.Cleanup(func() {
ghw.PCI = oldPCI
})
tests := []struct {
desc string
devices []*pci.Device
want string
}{{
desc: "known devices",
devices: []*pci.Device{{
Vendor: &testVendor,
Product: &testProduct,
Class: &testClass,
Driver: "testdriver1",
}, {
Vendor: &testVendor,
Product: &testProduct,
Class: &testClass,
Driver: "testdriver2",
}, {
Vendor: &testVendor,
Product: &testProduct,
Class: &testClass,
Driver: "testdriver2",
}},
want: `
# HELP pci_device_count Number of PCI devices by vendor, product, class, and driver.
# TYPE pci_device_count gauge
pci_device_count{class="Ethernet controller",driver="testdriver1",product="Ethernet Connection (17) I219-LM",vendor="Intel Corporation"} 1
pci_device_count{class="Ethernet controller",driver="testdriver2",product="Ethernet Connection (17) I219-LM",vendor="Intel Corporation"} 2
`,
}, {
desc: "unknown device",
devices: []*pci.Device{{
Vendor: &unknownVendor,
Product: &unknownProduct,
Class: &unknownClass,
Driver: "testdriver",
}},
want: `
# HELP pci_device_count Number of PCI devices by vendor, product, class, and driver.
# TYPE pci_device_count gauge
pci_device_count{class="0x9abc",driver="testdriver",product="0x5678",vendor="0x1234"} 1
`,
}}
collector := newPciCollector()
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
ghw.PCI = func(opts ...*option.Option) (*pci.Info, error) {
return &pci.Info{Devices: tc.devices}, nil
}
if err := testutil.CollectAndCompare(collector, strings.NewReader(tc.want), "pci_device_count"); err != nil {
t.Errorf("unexpected collecting result:\n%s", err)
}
})
}
}
================================================
FILE: src/go/cmd/metadata-server/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
load("@rules_oci//oci:defs.bzl", "oci_image")
load("@rules_pkg//pkg:tar.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = [
"coredns.go",
"main.go",
"metadata.go",
"nftables.go",
],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/metadata-server",
visibility = ["//visibility:private"],
deps = [
"//src/go/pkg/robotauth:go_default_library",
"@com_github_cenkalti_backoff//:go_default_library",
"@com_github_fsnotify_fsnotify//:go_default_library",
"@com_github_google_nftables//:go_default_library",
"@com_github_google_nftables//expr:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_client_go//kubernetes:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
"@org_golang_google_api//cloudresourcemanager/v1:go_default_library",
"@org_golang_x_oauth2//:go_default_library",
],
)
go_test(
name = "go_default_test",
size = "small",
srcs = [
"coredns_test.go",
"main_test.go",
"metadata_test.go",
],
embed = [":go_default_library"],
visibility = ["//visibility:private"],
deps = [
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_client_go//kubernetes:go_default_library",
"@io_k8s_client_go//kubernetes/fake:go_default_library",
"@org_golang_x_oauth2//:go_default_library",
],
)
go_binary(
name = "metadata-server-app",
embed = [":go_default_library"],
visibility = ["//visibility:private"],
)
pkg_tar(
name = "metadata-server-image-layer",
srcs = [":metadata-server-app"],
extension = "tar.gz",
)
oci_image(
name = "metadata-server-image",
base = "@iptables_base",
entrypoint = ["/metadata-server-app"],
tars = [":metadata-server-image-layer"],
visibility = ["//visibility:public"],
)
================================================
FILE: src/go/cmd/metadata-server/coredns.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"regexp"
"strings"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
const (
configMapName = "coredns"
configMapNamespace = "kube-system"
corefileName = "Corefile"
zoneStart = ".:53 {\n"
zoneStartPatched = `.:53 {
hosts hosts metadata.google.internal {
169.254.169.254 metadata.google.internal
fallthrough
}
`
hostsStart = "hosts {\n"
hostsStartPatched = `hosts hosts metadata.google.internal host.minikube.internal {
169.254.169.254 metadata.google.internal
`
)
var (
hostsHostMinikubeInternalPattern = regexp.MustCompile(hostsStart + `(\s*[.\d]+ host\.minikube\.internal\n\s*fallthrough\n\s*\})`)
hostsAnyPattern = regexp.MustCompile(`hosts [^{]*{\n`)
)
func getCorefile(ctx context.Context, k8s kubernetes.Interface) (*v1.ConfigMap, error) {
cm, err := k8s.CoreV1().ConfigMaps(configMapNamespace).Get(ctx, configMapName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get ConfigMap %s: %v", configMapName, err)
}
if _, ok := cm.Data[corefileName]; !ok {
return nil, fmt.Errorf("ConfigMap %s doesn't contain key %s", configMapName, corefileName)
}
if !strings.Contains(cm.Data[corefileName], zoneStart) {
return nil, fmt.Errorf("zone start %q not found in Corefile", zoneStart)
}
return cm, nil
}
func writeCorefile(ctx context.Context, k8s kubernetes.Interface, cm *v1.ConfigMap) error {
_, err := k8s.CoreV1().ConfigMaps(configMapNamespace).Update(ctx, cm, metav1.UpdateOptions{})
return err
}
// PatchCorefile reads the CoreDNS config map and patches the Corefile to resolve
// metadata.google.internal to 169.254.169.254.
func PatchCorefile(ctx context.Context, k8s kubernetes.Interface) error {
cm, err := getCorefile(ctx, k8s)
if err != nil {
return err
}
// must be idempotent
if strings.Contains(cm.Data[corefileName], zoneStartPatched) || strings.Contains(cm.Data[corefileName], hostsStartPatched) {
return nil
}
if m := hostsHostMinikubeInternalPattern.FindStringIndex(cm.Data[corefileName]); m != nil {
cm.Data[corefileName] = hostsHostMinikubeInternalPattern.ReplaceAllString(cm.Data[corefileName], hostsStartPatched+"$1")
} else {
cm.Data[corefileName] = strings.Replace(cm.Data[corefileName], zoneStart, zoneStartPatched, 1)
}
if len(hostsAnyPattern.FindAllString(cm.Data[corefileName], -1)) > 1 {
return fmt.Errorf("multiple hosts entries after patching, please check the input")
}
return writeCorefile(ctx, k8s, cm)
}
// RevertCorefile undoes the effect of PatchCorefile.
func RevertCorefile(ctx context.Context, k8s kubernetes.Interface) error {
cm, err := getCorefile(ctx, k8s)
if err != nil {
return err
}
containsZoneStartPatch := strings.Contains(cm.Data[corefileName], zoneStartPatched)
containsHostsStartPatch := strings.Contains(cm.Data[corefileName], hostsStartPatched)
// must be idempotent
if !containsZoneStartPatch && !containsHostsStartPatch {
return nil
}
if containsZoneStartPatch && !containsHostsStartPatch {
cm.Data[corefileName] = strings.Replace(cm.Data[corefileName], zoneStartPatched, zoneStart, 1)
} else if !containsZoneStartPatch && containsHostsStartPatch {
cm.Data[corefileName] = strings.Replace(cm.Data[corefileName], hostsStartPatched, hostsStart, 1)
} else {
return fmt.Errorf("cannot contain both patches")
}
return writeCorefile(ctx, k8s, cm)
}
================================================
FILE: src/go/cmd/metadata-server/coredns_test.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"strings"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
)
const (
defaultCorefileBeforeMinikube121 = `.:53 {
whoami
}
`
modifiedCorefileBeforeMinikube121 = `.:53 {
hosts hosts metadata.google.internal {
169.254.169.254 metadata.google.internal
fallthrough
}
whoami
}
`
defaultCorefileAfterMinikube121 = `.:53 {
whoami
hosts {
127.0.0.1 host.minikube.internal
fallthrough
}
}
`
modifiedCorefileAfterMinikube121 = `.:53 {
whoami
hosts hosts metadata.google.internal host.minikube.internal {
169.254.169.254 metadata.google.internal
127.0.0.1 host.minikube.internal
fallthrough
}
}
`
defaultCorefileUnexpected = `.:53 {
whoami
hosts {
127.0.0.1 host2.minikube.internal
fallthrough
}
}
`
differentIp = "1.2.3.456"
)
func createCorefile(t *testing.T, k8s kubernetes.Interface, corefileData string) {
if _, err := k8s.CoreV1().ConfigMaps(configMapNamespace).Create(
context.Background(),
&v1.ConfigMap{
Data: map[string]string{
corefileName: corefileData,
},
ObjectMeta: metav1.ObjectMeta{
Name: configMapName,
},
},
metav1.CreateOptions{}); err != nil {
t.Errorf("error creating ConfigMap %s: %v", configMapName, err)
}
}
func readCorefile(t *testing.T, k8s kubernetes.Interface) string {
cm, err := k8s.CoreV1().ConfigMaps(configMapNamespace).Get(context.Background(), configMapName, metav1.GetOptions{})
if err != nil {
t.Errorf("error reading ConfigMap coredns: %v", err)
return ""
}
data, ok := cm.Data[corefileName]
if !ok {
t.Errorf("ConfigMap %s doesn't contain key %s", configMapName, corefileName)
return ""
}
return data
}
func TestPatchCorefile(t *testing.T) {
tests := []struct {
desc string
input string
want string
}{
{
"default corefile without host.minikube.internal entry",
defaultCorefileBeforeMinikube121,
modifiedCorefileBeforeMinikube121,
},
{
"default corefile with host.minikube.internal entry",
defaultCorefileAfterMinikube121,
modifiedCorefileAfterMinikube121,
},
{
"default corefile with host.minikube.internal entry, different IP",
strings.Replace(defaultCorefileAfterMinikube121, "127.0.0.1", differentIp, 1),
strings.Replace(modifiedCorefileAfterMinikube121, "127.0.0.1", differentIp, 1),
},
}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
ctx := context.Background()
k8s := fake.NewSimpleClientset()
createCorefile(t, k8s, tc.input)
if err := PatchCorefile(ctx, k8s); err != nil {
t.Errorf("error in PatchCorefile: %v", err)
}
if got := readCorefile(t, k8s); got != tc.want {
t.Errorf(`want readCorefile(t, k8s) = %q, got %q`, tc.want, got)
}
// Check that a second patch has no effect.
if err := PatchCorefile(ctx, k8s); err != nil {
t.Errorf("error in second PatchCorefile: %v", err)
}
if got := readCorefile(t, k8s); got != tc.want {
t.Errorf(`after second patch, want readCorefile(t, k8s) = %q, got %q`, tc.want, got)
}
// Check that reverting undoes the change.
if err := RevertCorefile(ctx, k8s); err != nil {
t.Errorf("error in RevertCorefile: %v", err)
}
if got := readCorefile(t, k8s); got != tc.input {
t.Errorf(`after revert, want readCorefile(t, k8s) = %q, got %q`, tc.input, got)
}
// Check that a second revert has no effect.
if err := RevertCorefile(ctx, k8s); err != nil {
t.Errorf("error in second RevertCorefile: %v", err)
}
if got := readCorefile(t, k8s); got != tc.input {
t.Errorf(`after second revert, want readCorefile(t, k8s) = %q, got %q`, tc.input, got)
}
})
}
}
func TestPatchCorefileUnexpected(t *testing.T) {
ctx := context.Background()
k8s := fake.NewSimpleClientset()
createCorefile(t, k8s, defaultCorefileUnexpected)
if err := PatchCorefile(ctx, k8s); err == nil {
t.Error("PatchCorefile() succeeded with invalid input, wanted error")
}
if got := readCorefile(t, k8s); got != defaultCorefileUnexpected {
t.Errorf(`unexpected input should not be modified, want readCorefile(t, k8s) = %q, got %q`, defaultCorefileUnexpected, got)
}
}
================================================
FILE: src/go/cmd/metadata-server/main.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package main runs a local http server providing details about the connected
// cloud project.
//
// This metadata server replicates a subset of the GKE metadata server
// functionality to provide application default credentials for local services.
package main
import (
"context"
"flag"
"fmt"
"log/slog"
"net"
"net/http"
"os"
"os/signal"
"syscall"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/fsnotify/fsnotify"
"github.com/googlecloudrobotics/ilog"
)
var (
bindIP = flag.String("bind_ip", "127.0.0.1", "IPv4 address to listen on")
port = flag.Int("port", 80, "Port number to listen on")
robotIdFile = flag.String("robot_id_file", "", "robot-id.json file")
sourceCidr = flag.String("source_cidr", "127.0.0.1/32", "CIDR giving allowed source addresses for token retrieval")
// Mirror gke behavior and return token with at least 5 minutes of remaining time.
// https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances#applications
minTokenExpiry = flag.Int("min_token_expiry", 300, "Minimum time a token needs to be valid for in seconds")
logPeerDetails = flag.Bool("log_peer_details", false, "When enabled details about the peer that requests ADC are logged on the expense of some extra latency")
logLevel = flag.Int("log_level", int(slog.LevelInfo), "the log message level required to be logged")
runningOnGKE = flag.Bool("running_on_gke", false, "If running on GKE, skip setup steps that are unnecessary and will fail.")
robotSAName = flag.String("service_account", "robot-service", "Robot default service account name, default: robot-service")
)
func detectChangesToFile(filename string) <-chan struct{} {
watcher, err := fsnotify.NewWatcher()
if err != nil {
slog.Error("NewWatcher", slog.Any("Error", err))
os.Exit(1)
}
err = watcher.Add(filename)
if err != nil {
slog.Error("Watcher.Add", slog.Any("Error", err))
os.Exit(1)
}
changes := make(chan struct{})
go func() {
for {
select {
case event, ok := <-watcher.Events:
if !ok {
return
}
slog.Info("event", slog.String("File", filename), slog.Any("Event", event))
if event.Op&(fsnotify.Write|fsnotify.Remove) != 0 {
changes <- struct{}{}
}
case err, ok := <-watcher.Errors:
if !ok {
return
}
slog.Warn("watcher", slog.Any("Error", err))
}
}
}()
return changes
}
func main() {
flag.Parse()
logHandler := ilog.NewLogHandler(slog.Level(*logLevel), os.Stderr)
slog.SetDefault(slog.New(logHandler))
if ip := net.ParseIP(*bindIP); ip == nil {
slog.Error("invalid bind_ip flag")
os.Exit(1)
}
ctx := context.Background()
config, err := rest.InClusterConfig()
if err != nil {
slog.Error("Can't create k8s in-cluster config", slog.Any("Error", err))
os.Exit(1)
}
k8s, err := kubernetes.NewForConfig(config)
if err != nil {
slog.Error("Can't create k8s client", slog.Any("Error", err))
os.Exit(1)
}
tokenHandler, err := NewTokenHandler(ctx, k8s, *robotSAName)
if err != nil {
slog.Error("NewTokenHandler", slog.Any("Error", err))
os.Exit(1)
}
identityHandler, err := NewIdentityHandler(ctx)
if err != nil {
slog.Error("NewIdentityHandler", slog.Any("Error", err))
os.Exit(1)
}
http.Handle("/computeMetadata/v1/instance/service-accounts/default/token", tokenHandler)
http.Handle("/computeMetadata/v1/instance/service-accounts/default/identity", identityHandler)
serviceAccountHandler := ServiceAccountHandler{}
http.Handle("/computeMetadata/v1/instance/service-accounts/default/", serviceAccountHandler)
http.Handle("/computeMetadata/v1/instance/service-accounts/", ConstHandler{[]byte("default/\n")})
metadataHandler := tokenHandler.NewMetadataHandler(ctx)
http.Handle("/computeMetadata/v1/", metadataHandler)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
slog.Info("Handling root request", slog.String("URL", r.URL.Path))
w.Header().Set("Metadata-Flavor", "Google")
w.Header().Set("Content-Type", "application/text")
w.WriteHeader(http.StatusOK)
w.Write([]byte("computeMetadata/\n"))
} else {
slog.Warn("Unhandled request", slog.String("URL", r.URL.Path), slog.String("Origin", r.RemoteAddr))
http.NotFound(w, r)
}
})
// Return dummy IP addresses for internal/external IPs. cloudprober
// crashes if these are not present.
http.Handle("/computeMetadata/v1/instance/network-interfaces/0/ip", ConstHandler{[]byte("127.0.0.1/\n")})
http.Handle("/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip", ConstHandler{[]byte("127.0.0.1/\n")})
// Make sure that the bind is successful before adding the iptables rule as a means of
// avoiding a race condition where an old metadata-server instance that has not yet fully
// terminated removes the iptables rule again.
bindAddress := fmt.Sprintf("%s:%d", *bindIP, *port)
ln, err := net.Listen("tcp", bindAddress)
if err != nil {
slog.Error("failed to create listener", slog.String("Address", bindAddress), slog.Any("Error", err))
os.Exit(1)
}
slog.Info("Listening", slog.String("Address", bindAddress))
if err := addNATRule(*bindIP, *port); err != nil {
slog.Error("failed to add iptables rule", slog.Any("Error", err))
os.Exit(1)
}
if !*runningOnGKE {
if err := PatchCorefile(ctx, k8s); err != nil {
removeNATRule()
slog.Error("PatchCorefile", slog.Any("Error", err))
os.Exit(1)
}
}
go func() {
err = http.Serve(ln, nil)
if !*runningOnGKE {
RevertCorefile(ctx, k8s)
}
removeNATRule()
slog.Error("Serve", slog.Any("Error", err))
os.Exit(1)
}()
go func() {
<-detectChangesToFile(*robotIdFile)
if !*runningOnGKE {
RevertCorefile(ctx, k8s)
}
removeNATRule()
slog.Error("File changed but reloading is not implemented. Crashing...", slog.String("ID", *robotIdFile))
os.Exit(1)
}()
stop := make(chan os.Signal)
signal.Notify(stop, syscall.SIGTERM)
<-stop
if !*runningOnGKE {
RevertCorefile(ctx, k8s)
}
removeNATRule()
}
================================================
FILE: src/go/cmd/metadata-server/main_test.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"os"
"testing"
"time"
)
const (
writeTimeout = 100 * time.Millisecond
)
func TestDetectsDeletionOfFile(t *testing.T) {
tmpfile, err := os.CreateTemp("", "tmpfile")
if err != nil {
t.Fatal(err)
}
if err := tmpfile.Close(); err != nil {
t.Fatal(err)
}
changes := detectChangesToFile(tmpfile.Name())
if err := os.Remove(tmpfile.Name()); err != nil {
t.Fatal(err)
}
select {
case <-changes:
break
case <-time.After(writeTimeout):
t.Errorf("no change detected after %s", writeTimeout)
}
}
func TestNoChangeDetectedWhenFileUnchanged(t *testing.T) {
tmpfile, err := os.CreateTemp("", "tmpfile")
if err != nil {
t.Fatal(err)
}
if err := tmpfile.Close(); err != nil {
t.Fatal(err)
}
defer os.Remove(tmpfile.Name())
changes := detectChangesToFile(tmpfile.Name())
select {
case <-changes:
t.Errorf("unexpected change detected after %s", writeTimeout)
case <-time.After(writeTimeout):
}
}
================================================
FILE: src/go/cmd/metadata-server/metadata.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metadata implements the metadat http handlers.
package main
import (
"context"
"encoding/json"
"fmt"
"hash/fnv"
"log/slog"
"net"
"net/http"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/cenkalti/backoff"
"github.com/googlecloudrobotics/core/src/go/pkg/robotauth"
"github.com/googlecloudrobotics/ilog"
"golang.org/x/oauth2"
cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
const (
// getPodByIPRetries configures how often we retry determining the ip for a pod
getPodByIPRetries = 10
// getPodByIPWait confgured the time to sleep between the retries
getPodByIPWait = 500 * time.Millisecond
)
// rateLimitTokenSource is a TokenSource that applies exponential backoff on
// errors, returning the previous error if called again too soon. It doesn't
// rate-limit on success, as it assumes it wraps an oauth2.ReuseTokenSource.
type rateLimitTokenSource struct {
wrapped oauth2.TokenSource
mu sync.Mutex // guards err and next
err error
delay time.Duration
next time.Time
}
func newRateLimitTokenSource(ts oauth2.TokenSource) *rateLimitTokenSource {
rlts := &rateLimitTokenSource{wrapped: ts}
rlts.resetBackoff()
return rlts
}
var timeNow = time.Now
// Token tries to fetch a token, unless an error was recently encountered, in
// which case the previous error is returned.
func (s *rateLimitTokenSource) Token() (*oauth2.Token, error) {
s.mu.Lock()
defer s.mu.Unlock()
if timeNow().Before(s.next) {
return nil, s.err
}
t, err := s.wrapped.Token()
if err != nil {
s.updateBackoff(err)
return nil, err
}
s.resetBackoff()
return t, nil
}
func (s *rateLimitTokenSource) resetBackoff() {
s.delay = 100 * time.Millisecond
s.next = time.Time{}
}
func (s *rateLimitTokenSource) updateBackoff(err error) {
s.next = timeNow().Add(s.delay)
s.err = err
if s.delay < 120*time.Second {
s.delay *= 2
}
}
// ConstHandler serves OK responses with static body content.
type ConstHandler struct {
Body []byte
}
func (ch ConstHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Metadata-Flavor", "Google")
w.Header().Set("Content-Type", "application/text")
w.WriteHeader(http.StatusOK)
w.Write(ch.Body)
}
type jwtSource interface {
CreateJWT(context.Context, time.Duration) (string, error)
}
// IdentityHandler serves JWT identity tokens for the robot
type IdentityHandler struct {
AllowedSources *net.IPNet
robotAuth jwtSource
}
func NewIdentityHandler(ctx context.Context) (*IdentityHandler, error) {
_, allowedSources, err := net.ParseCIDR(*sourceCidr)
if err != nil {
return nil, fmt.Errorf("invalid source CIDR %s: %w", *sourceCidr, err)
}
robotAuth, err := robotauth.LoadFromFile(*robotIdFile)
if err != nil {
return nil, fmt.Errorf("failed to read robot id file %s: %w", *robotIdFile, err)
}
i := &IdentityHandler{
AllowedSources: allowedSources,
robotAuth: robotAuth,
}
return i, nil
}
func fromAcceptedIP(w http.ResponseWriter, r *http.Request, allowedSources *net.IPNet) bool {
ipPort := strings.Split(r.RemoteAddr, ":")
if len(ipPort) != 2 {
slog.Error("Unable to obtain IP from remote address", slog.String("Address", r.RemoteAddr))
http.Error(w, "Unable to check authorization", http.StatusInternalServerError)
return false
}
if ip := net.ParseIP(ipPort[0]); ip == nil || !allowedSources.Contains(ip) {
slog.Error("Rejected remote IP", slog.String("IP", ipPort[0]))
http.Error(w, "Access forbidden", http.StatusForbidden)
return false
}
return true
}
func (h *IdentityHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if !fromAcceptedIP(w, r, h.AllowedSources) {
return
}
jwt, err := h.robotAuth.CreateJWT(r.Context(), time.Minute*15)
if err != nil {
slog.Error("Unable to create JWT", ilog.Err(err))
http.Error(w, "Unable to create jwt", http.StatusInternalServerError)
return
}
w.Header().Set("Metadata-Flavor", "Google")
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
w.WriteHeader(http.StatusOK)
w.Write([]byte(jwt))
slog.Info("Served identity token", slog.String("Address", r.RemoteAddr))
}
// TokenHandler serves access tokens for the associated GCP service-account.
type TokenHandler struct {
AllowedSources *net.IPNet
TokenSource oauth2.TokenSource
Clock func() time.Time
robotAuth auth
k8s *kubernetes.Clientset
saName string
}
type auth interface {
CreateRobotTokenSource(context.Context, ...string) oauth2.TokenSource
projectID() string
robotName() string
}
type rAuth struct {
robotauth.RobotAuth
}
func (a rAuth) projectID() string {
return a.ProjectId
}
func (a rAuth) robotName() string {
return a.RobotName
}
type TokenResponse struct {
AccessToken string `json:"access_token"`
ExpiresInSec int `json:"expires_in"`
TokenType string `json:"token_type"`
}
func NewTokenHandler(ctx context.Context, k8s *kubernetes.Clientset, saName string) (*TokenHandler, error) {
_, allowedSources, err := net.ParseCIDR(*sourceCidr)
if err != nil {
return nil, fmt.Errorf("invalid source CIDR %s: %w", *sourceCidr, err)
}
t := &TokenHandler{
AllowedSources: allowedSources,
Clock: time.Now,
k8s: k8s,
saName: saName,
}
if err := t.updateRobotAuth(); err != nil {
return nil, err
}
t.updateRobotTokenSource(ctx)
return t, nil
}
func (th *TokenHandler) updateRobotAuth() error {
robotAuth, err := robotauth.LoadFromFile(*robotIdFile)
if err != nil {
return fmt.Errorf("failed to read robot id file %s: %w", *robotIdFile, err)
}
th.robotAuth = &rAuth{*robotAuth}
return nil
}
func (th *TokenHandler) updateRobotTokenSource(ctx context.Context) {
effectiveSA := th.saName
if effectiveSA == "" {
effectiveSA = "robot-service"
}
if !strings.Contains(effectiveSA, "@") {
effectiveSA = fmt.Sprintf("%s@%s.iam.gserviceaccount.com", effectiveSA, th.robotAuth.projectID())
}
th.TokenSource = newRateLimitTokenSource(th.robotAuth.CreateRobotTokenSource(ctx, effectiveSA))
}
func (th *TokenHandler) NewMetadataHandler(ctx context.Context) *MetadataHandler {
idHash := fnv.New64a()
idHash.Write([]byte(th.robotAuth.robotName()))
ret := &MetadataHandler{
ClusterName: th.robotAuth.robotName(),
ProjectId: th.robotAuth.projectID(),
ProjectNumber: 0,
RobotName: th.robotAuth.robotName(),
InstanceId: idHash.Sum64(),
// This needs to be an actual Cloud zone so that it can be mapped
// to a Monarch/Stackdriver region. TODO(swolter): We should make
// this zone configurable to avoid confusing users.
Zone: "europe-west1-c",
}
go backoff.Retry(
func() error {
projectNumber, err := getProjectNumber(oauth2.NewClient(ctx, th.TokenSource), th.robotAuth.projectID())
if err != nil {
slog.Info("will retry to obtain project number", slog.String("Project", th.robotAuth.projectID()), slog.Any("Error", err))
return err
}
atomic.StoreInt64(&ret.ProjectNumber, projectNumber)
return nil
},
backoff.NewConstantBackOff(5*time.Second),
)
return ret
}
// Return an access token for the 'robot-service' service account
// The query might also contain a 'scopes' query param, which we currently don't handle
// (e.g.: scopes=https://www.googleapis.com/auth/devstorage.full_control,https://www.googleapis.com/auth/cloud-platform HTTP/1.1)
func (th *TokenHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if !fromAcceptedIP(w, r, th.AllowedSources) {
return
}
token, err := th.TokenSource.Token()
if err != nil {
slog.Error("Token retrieval error", slog.Any("Error", err))
http.Error(w, fmt.Sprintf("Token retrieval failed: %v", err), http.StatusInternalServerError)
return
}
now := th.Clock()
expiresInSec := int(token.Expiry.Sub(now).Seconds())
// fluent-bit expects expires_in - 10% > 60 seconds
if expiresInSec < *minTokenExpiry {
th.updateRobotTokenSource(r.Context())
token, err = th.TokenSource.Token()
if err != nil {
slog.Error("Token retrieval error", slog.Any("Error", err))
http.Error(w, fmt.Sprintf("Token retrieval failed: %v", err), http.StatusInternalServerError)
return
}
expiresInSec = int(token.Expiry.Sub(now).Seconds())
}
tokenResponse := TokenResponse{
AccessToken: token.AccessToken,
ExpiresInSec: expiresInSec,
TokenType: token.TokenType,
}
bytes, err := json.Marshal(tokenResponse)
if err != nil {
slog.Error("Token serialization error", slog.Any("Error", err))
http.Error(w, fmt.Sprintf("Token serialization failed: %v", err), http.StatusInternalServerError)
return
}
// Collect some extra data for diagnostics (aka which services rely on ADCs).
// User-Agent: "Fluent-Bit", "gcloud-golang/0.1"
ua := r.Header.Get("User-Agent")
pod := th.getPodNameByIP(r.Context(), strings.Split(r.RemoteAddr, ":")[0])
w.Header().Set("Metadata-Flavor", "Google")
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
w.WriteHeader(http.StatusOK)
w.Write(bytes)
slog.Info("Served access token", slog.String("Address", r.RemoteAddr), slog.String("UA", ua), slog.String("Pod", pod))
}
func (th *TokenHandler) getPodNameByIP(ctx context.Context, ip string) string {
if th.k8s == nil || !*logPeerDetails {
// TODO(ensonic): need to add a k8s fake to the tests
return ""
}
// Meassure the time it takes to obtain the extra information
defer func(start time.Time) {
slog.Info("getPodNameByIP()", slog.Duration("Duration", time.Since(start)))
}(time.Now())
// TODO(ensonic): to avoid traversing all ns/pods each time we can
// - cache ip->ns/pod mapping
// - check first if we can still get a pod by these keys and if the IP still matches
// - do the listing otherwise
// TODO(ensonic): consider labeling namespaces that participate in ADCs. This will speedup the lookups
// and allows us to lock this down.
podsToRetry := []corev1.Pod{}
nss, err := th.k8s.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
slog.Error("Failed to list namespaces", slog.Any("Error", err))
}
for _, ns := range nss.Items {
nsName := ns.ObjectMeta.Name
pods, err := th.k8s.CoreV1().Pods(nsName).List(ctx, metav1.ListOptions{})
if err != nil {
slog.Error("Failed to list pods", slog.String("Namespace", nsName), slog.Any("Error", err))
}
for _, pod := range pods.Items {
if pod.Status.PodIP == ip {
return nsName + "/" + pod.Name
}
if pod.Status.PodIP == "" {
slog.Warn("Pod has no ip (yet)", slog.String("Pod", pod.Name), slog.String("Message", pod.Status.Message))
podsToRetry = append(podsToRetry, pod)
}
}
}
// We don't have the resource version from the pod creation (to be used in the ListOptions above). Hence
// we need to do this retry logic for cases where a pod just started and right away asked for an ADC.
retries := getPodByIPRetries
for len(podsToRetry) > 0 && retries > 0 {
time.Sleep(getPodByIPWait)
slog.Info("Retrying pods without ip", slog.Int("Count", len(podsToRetry)))
ptr := podsToRetry
podsToRetry = []corev1.Pod{}
for _, p := range ptr {
nsName := p.ObjectMeta.Namespace
podName := p.ObjectMeta.Name
pod, err := th.k8s.CoreV1().Pods(nsName).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
slog.Error("Failed to get pod", slog.String("Pod", podName), slog.String("Namespace", nsName), slog.Any("Error", err))
}
if pod.Status.PodIP == ip {
return nsName + "/" + pod.Name
}
if pod.Status.PodIP == "" {
slog.Info("Pod has no ip (yet)", slog.String("Pod", pod.Name), slog.String("Message", pod.Status.Message))
podsToRetry = append(podsToRetry, *pod)
}
}
retries--
}
slog.Info("No pod found", slog.String("IP", ip))
return ""
}
// ServiceAccountHandler serves information about the default service account.
type ServiceAccountHandler struct {
}
type ServiceAccountResponse struct {
Aliases []string `json:"aliases"`
Email string `json:"email"`
Scopes []string `json:"scopes"`
}
func (sh ServiceAccountHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
serviceAccountResponse := ServiceAccountResponse{
Aliases: []string{},
Email: "default",
Scopes: []string{},
}
bytes, err := json.Marshal(serviceAccountResponse)
if err != nil {
slog.Error("ServiceAccountResponse serialization error", slog.Any("Error", err))
http.Error(w, fmt.Sprintf("ServiceAccountResponse serialization failed: %v", err), http.StatusInternalServerError)
return
}
w.Header().Set("Metadata-Flavor", "Google")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(bytes)
slog.Info("Responded to service-account request", slog.String("Origin", r.RemoteAddr), slog.String("URL", r.URL.Path))
}
// MetadataHandler serves generic instance metadata.
type MetadataHandler struct {
ClusterName string
ProjectId string
ProjectNumber int64
RobotName string
InstanceId uint64
Zone string
}
func (mh MetadataHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Metadata-Flavor", "Google")
w.Header().Set("Content-Type", "application/text")
projectNumber := atomic.LoadInt64(&mh.ProjectNumber)
if projectNumber == 0 {
slog.Warn("Metadata endpoint was requested before it was ready")
http.Error(w, "Metadata endpoint not ready yet", http.StatusInternalServerError)
return
}
metadata := map[string]string{
"project/project-id": mh.ProjectId,
"project/numeric-project-id": fmt.Sprintf("%d", projectNumber),
"instance/hostname": fmt.Sprintf("robot-%s", mh.RobotName),
"instance/id": fmt.Sprintf("%d", mh.InstanceId),
"instance/zone": fmt.Sprintf("projects/%d/zones/%s", projectNumber, mh.Zone),
"instance/attributes/": "kube-env\ncluster-name\ncluster-location\n",
"instance/attributes/kube-env": fmt.Sprintf("CLUSTER_NAME: %s\n", mh.ClusterName),
"instance/attributes/cluster-name": mh.ClusterName,
"instance/attributes/cluster-location": mh.Zone,
}
key := strings.TrimPrefix(r.URL.Path, "/computeMetadata/v1/")
value := metadata[key]
if value == "" {
slog.Warn("No key found", slog.String("URL", r.URL.Path))
http.NotFound(w, r)
return
}
w.WriteHeader(http.StatusOK)
w.Write([]byte(value))
slog.Info("Responded to metadata request", slog.String("URL", r.URL.Path), slog.String("Value", value))
}
func getProjectNumber(client *http.Client, projectId string) (int64, error) {
crm, err := cloudresourcemanager.New(client)
if err != nil {
return 0, err
}
project, err := crm.Projects.Get(projectId).Do()
if err != nil {
return 0, err
}
return project.ProjectNumber, nil
}
================================================
FILE: src/go/cmd/metadata-server/metadata_test.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"errors"
"io"
"log/slog"
"net"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"time"
"golang.org/x/oauth2"
)
func bodyOrDie(r *http.Response) string {
body, err := io.ReadAll(r.Body)
if err != nil {
slog.Error("Failed to read body stream")
os.Exit(1)
}
return string(body)
}
func TestConstHandler(t *testing.T) {
req := httptest.NewRequest("GET", "/url", strings.NewReader("body"))
respRecorder := httptest.NewRecorder()
ch := ConstHandler{[]byte("response")}
ch.ServeHTTP(respRecorder, req)
if want, got := 200, respRecorder.Result().StatusCode; want != got {
t.Errorf("Wrong response code; want %d; got %d", want, got)
}
if want, got := "response", bodyOrDie(respRecorder.Result()); want != got {
t.Errorf("Wrong response body; want %s; got %s", want, got)
}
}
type fakeJWTSource struct {
val string
d time.Duration
}
func (s *fakeJWTSource) CreateJWT(_ context.Context, d time.Duration) (string, error) {
s.d = d
return s.val, nil
}
func TestIdentityHandlerServeHTTP(t *testing.T) {
t.Parallel()
h := IdentityHandler{
AllowedSources: &net.IPNet{net.IPv4(192, 168, 0, 0), net.CIDRMask(24, 32)},
robotAuth: &fakeJWTSource{val: "value"},
}
t.Run("simple", func(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("GET", "/computeMetadata/v1/instance/service-accounts/default/identity", nil)
req.RemoteAddr = "192.168.0.101:8001"
respRecorder := httptest.NewRecorder()
h.ServeHTTP(respRecorder, req)
if want, got := http.StatusOK, respRecorder.Result().StatusCode; want != got {
t.Errorf("Wrong response code; want %d; got %d", want, got)
}
if want, got := "value", bodyOrDie(respRecorder.Result()); want != got {
t.Errorf("Wrong response body; want %s; got %s", want, got)
}
})
t.Run("outside-addr", func(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("GET", "/computeMetadata/v1/instance/service-accounts/default/identity", nil)
req.RemoteAddr = "192.168.1.101:8001"
respRecorder := httptest.NewRecorder()
h.ServeHTTP(respRecorder, req)
if want, got := http.StatusForbidden, respRecorder.Result().StatusCode; want != got {
t.Errorf("Wrong response code; want %d; got %d", want, got)
}
})
}
func TestTokenHandlerServesToken(t *testing.T) {
oldMinTokenExpiry := *minTokenExpiry
*minTokenExpiry = 1
t.Cleanup(func() { *minTokenExpiry = oldMinTokenExpiry })
testTime := time.Unix(1531319123, 0)
req := httptest.NewRequest("GET", "/computeMetadata/v1/instance/service-accounts/default/token", strings.NewReader("body"))
req.RemoteAddr = "192.168.0.101:8001"
respRecorder := httptest.NewRecorder()
th := TokenHandler{
AllowedSources: &net.IPNet{net.IPv4(192, 168, 0, 0), net.CIDRMask(24, 32)},
TokenSource: oauth2.StaticTokenSource(&oauth2.Token{AccessToken: "mytoken", Expiry: testTime.Add(10 * time.Second), TokenType: "Bearer"}),
Clock: func() time.Time { return testTime },
}
th.ServeHTTP(respRecorder, req)
if want, got := 200, respRecorder.Result().StatusCode; want != got {
t.Errorf("Wrong response code; want %d; got %d", want, got)
}
if want, got := "{\"access_token\":\"mytoken\",\"expires_in\":10,\"token_type\":\"Bearer\"}", bodyOrDie(respRecorder.Result()); want != got {
t.Errorf("Wrong response body; want %s; got %s", want, got)
}
}
type fakeRobotAuth struct {
ts oauth2.TokenSource
id string
name string
}
func (a *fakeRobotAuth) CreateRobotTokenSource(context.Context, ...string) oauth2.TokenSource {
return a.ts
}
func (a *fakeRobotAuth) projectID() string {
return a.id
}
func (a *fakeRobotAuth) robotName() string {
return a.name
}
func TestTokenHandlerServesLastingToken(t *testing.T) {
oldMinTokenExpiry := *minTokenExpiry
*minTokenExpiry = 300
t.Cleanup(func() { *minTokenExpiry = oldMinTokenExpiry })
testTime := time.Unix(1531319123, 0)
req := httptest.NewRequest("GET", "/computeMetadata/v1/instance/service-accounts/default/token", strings.NewReader("body"))
req.RemoteAddr = "192.168.0.101:8001"
respRecorder := httptest.NewRecorder()
th := TokenHandler{
AllowedSources: &net.IPNet{net.IPv4(192, 168, 0, 0), net.CIDRMask(24, 32)},
TokenSource: oauth2.StaticTokenSource(&oauth2.Token{AccessToken: "mytoken", Expiry: testTime.Add(10 * time.Second), TokenType: "Bearer"}),
Clock: func() time.Time { return testTime },
robotAuth: &fakeRobotAuth{
ts: oauth2.StaticTokenSource(&oauth2.Token{AccessToken: "mytoken", Expiry: testTime.Add(1000 * time.Second), TokenType: "Bearer"}),
},
}
th.ServeHTTP(respRecorder, req)
if want, got := 200, respRecorder.Result().StatusCode; want != got {
t.Errorf("Wrong response code; want %d; got %d", want, got)
}
if want, got := "{\"access_token\":\"mytoken\",\"expires_in\":1000,\"token_type\":\"Bearer\"}", bodyOrDie(respRecorder.Result()); want != got {
t.Errorf("Wrong response body; want %s; got %s", want, got)
}
}
func TestTokenHandlerDeniesWrongAddress(t *testing.T) {
req := httptest.NewRequest("GET", "/computeMetadata/v1/instance/service-accounts/default/token", strings.NewReader("body"))
req.RemoteAddr = "192.168.1.101:8001"
respRecorder := httptest.NewRecorder()
th := TokenHandler{
AllowedSources: &net.IPNet{net.IPv4(192, 168, 0, 0), net.CIDRMask(24, 32)},
TokenSource: oauth2.StaticTokenSource(&oauth2.Token{AccessToken: "mytoken"}),
}
th.ServeHTTP(respRecorder, req)
if want, got := 403, respRecorder.Result().StatusCode; want != got {
t.Errorf("Wrong response code; want %d; got %d", want, got)
}
}
func TestServiceAccountHandlerReturnsMinimalJSON(t *testing.T) {
req := httptest.NewRequest("GET", "/computeMetadata/v1/instance/service-accounts/default/?recursive=true", strings.NewReader("body"))
req.RemoteAddr = "192.168.1.101:8001"
respRecorder := httptest.NewRecorder()
sh := ServiceAccountHandler{}
sh.ServeHTTP(respRecorder, req)
if want, got := 200, respRecorder.Result().StatusCode; want != got {
t.Errorf("Wrong response code; want %d; got %d", want, got)
}
if want, got := "{\"aliases\":[],\"email\":\"default\",\"scopes\":[]}", bodyOrDie(respRecorder.Result()); want != got {
t.Errorf("Wrong response body; want %s; got %s", want, got)
}
}
func TestMetadataHandlerReturnsZone(t *testing.T) {
req := httptest.NewRequest("GET", "/computeMetadata/v1/instance/zone", strings.NewReader("body"))
respRecorder := httptest.NewRecorder()
mh := MetadataHandler{
ClusterName: "28",
ProjectId: "foo",
ProjectNumber: 512,
RobotName: "28",
Zone: "edge",
}
mh.ServeHTTP(respRecorder, req)
if want, got := 200, respRecorder.Result().StatusCode; want != got {
t.Errorf("Wrong response code; want %d; got %d", want, got)
}
if want, got := "projects/512/zones/edge", bodyOrDie(respRecorder.Result()); want != got {
t.Errorf("Wrong response body; want %s; got %s", want, got)
}
}
var errToken = errors.New("failed to get token")
// fakeTokenSource returns `Errors` consecutive errors then returns tokens.
// Calls counts the number of calls so far.
type fakeTokenSource struct {
Calls int
Errors int
}
func (s *fakeTokenSource) Token() (*oauth2.Token, error) {
s.Calls = s.Calls + 1
if s.Calls <= s.Errors {
return nil, errToken
}
return &oauth2.Token{}, nil
}
func TestRateLimitTokenSource(t *testing.T) {
oldTimeNow := timeNow
t.Cleanup(func() {
timeNow = oldTimeNow
})
// Test that we retry a certain number of errors within a given amount of
// time, then succeed. Since we use 100ms steps (not real time), maxTime
// should not be too large or the test may get slow: One hour can be
// "simulated" in a few ms, a year takes ~15s.
tests := []struct {
desc string
errors int
// Acceptable range of overall duration (too lazy to do the maths to
// work out exactly how long it should wait).
minTime time.Duration
maxTime time.Duration
}{
{
desc: "no errors",
errors: 0,
minTime: 0,
maxTime: 0,
},
{
desc: "single error retried within 0.5s",
errors: 1,
minTime: 100 * time.Millisecond,
maxTime: 500 * time.Millisecond,
},
{
desc: "two errors retried within 1s",
errors: 2,
minTime: 200 * time.Millisecond,
maxTime: time.Second,
},
{
desc: "20 errors retried within 1h",
errors: 20,
minTime: 15 * time.Minute,
maxTime: time.Hour,
},
}
startTime := time.Time{}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
clock := startTime
timeNow = func() time.Time { return clock }
fakeTS := &fakeTokenSource{Errors: tc.errors}
ts := newRateLimitTokenSource(fakeTS)
for ; clock.Sub(startTime) <= tc.maxTime; clock = clock.Add(100 * time.Millisecond) {
_, err := ts.Token()
if err != nil {
continue
}
break
}
duration := clock.Sub(startTime)
if duration < tc.minTime {
t.Errorf("Token() succeeded within %s, want at least %s", duration, tc.minTime)
}
if duration > tc.maxTime {
t.Errorf("Token() did not succeed within %s", tc.maxTime)
}
})
}
}
================================================
FILE: src/go/cmd/metadata-server/nftables.go
================================================
// Copyright 2025 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"encoding/binary"
"fmt"
"log"
"net"
"github.com/google/nftables"
"github.com/google/nftables/expr"
)
var metadataRule = []byte("metadata-nat")
func addNATRule(listenIP string, listenPort int) error {
con, err := nftables.New()
if err != nil {
return fmt.Errorf("nftables new: %v", err)
}
table := con.AddTable(&nftables.Table{
Name: "nat",
Family: nftables.TableFamilyIPv4,
})
accept := nftables.ChainPolicyAccept
prerouting := con.AddChain(&nftables.Chain{
Name: "PREROUTING",
Table: table,
Hooknum: nftables.ChainHookPrerouting,
Priority: nftables.ChainPriorityNATDest,
Type: nftables.ChainTypeNAT,
Policy: &accept,
})
destinationPort := make([]byte, 2)
binary.BigEndian.PutUint16(destinationPort, uint16(listenPort))
destinationIP := net.ParseIP(listenIP)
if destinationIP == nil {
return fmt.Errorf("%s is not a valid IPv4 address", listenIP)
}
e := []expr.Any{
// Load network IP into register 1
&expr.Payload{
OperationType: expr.PayloadLoad,
DestRegister: 1,
SourceRegister: 0,
Base: expr.PayloadBaseNetworkHeader,
Offset: 16,
Len: 4,
CsumType: expr.CsumTypeNone,
//CsumOffset: 0,
//CsumFlags: 0,
},
// Match register 1 with metadata server IP
&expr.Cmp{
Op: expr.CmpOpEq,
Register: 1,
Data: net.IPv4(169, 254, 169, 254).To4(),
},
// Load transport layer protocol into register 1
&expr.Meta{
Key: expr.MetaKeyL4PROTO,
SourceRegister: false,
Register: 1,
},
// Match transport layer protocol with TCP
&expr.Cmp{
Op: expr.CmpOpEq,
Register: 1,
Data: []byte{6}, // TCP
},
// Load network port into register 1
&expr.Payload{
OperationType: expr.PayloadLoad,
DestRegister: 1,
SourceRegister: 0,
Base: expr.PayloadBaseTransportHeader,
Offset: 2,
Len: 2,
CsumType: expr.CsumTypeNone,
//CsumOffset: 0,
//CsumFlags: 0,
},
// Match register 1 with port 80
&expr.Cmp{
Op: expr.CmpOpEq,
Register: 1,
Data: []byte{0, 80},
},
// Adding a counter helps debugging
&expr.Counter{
Bytes: 0,
Packets: 0,
},
// Place destination IP in register 1
&expr.Immediate{
Register: 1,
Data: net.IPv4(127, 0, 0, 1).To4(),
},
// Place destination port in register 2
&expr.Immediate{
Register: 2,
Data: destinationPort,
},
&expr.NAT{
Type: 1,
Family: 2,
RegAddrMin: 1,
RegAddrMax: 1,
RegProtoMin: 2,
RegProtoMax: 2,
Random: false,
FullyRandom: false,
Persistent: false,
},
}
con.AddRule(&nftables.Rule{
Table: table,
Chain: prerouting,
// Flags:
Exprs: e,
UserData: metadataRule,
})
if err := con.Flush(); err != nil {
return fmt.Errorf("nftables flush: %v", err)
}
return nil
}
func removeNATRule() {
con, err := nftables.New()
if err != nil {
log.Printf("Warning: nftables invocation failed: %v", err)
}
table := con.AddTable(&nftables.Table{
Name: "nat",
Family: nftables.TableFamilyIPv4,
})
accept := nftables.ChainPolicyAccept
prerouting := con.AddChain(&nftables.Chain{
Name: "PREROUTING",
Table: table,
Hooknum: nftables.ChainHookPrerouting,
Priority: nftables.ChainPriorityNATDest,
Type: nftables.ChainTypeNAT,
Policy: &accept,
})
rs, err := con.GetRules(table, prerouting)
if err != nil {
log.Printf("Warning: nftables invocation failed: %v", err)
}
for _, r := range rs {
if bytes.Equal(r.UserData, metadataRule) {
con.DelRule(r)
}
}
if err := con.Flush(); err != nil {
log.Printf("Warning: nftables invocation failed: %v", err)
}
}
================================================
FILE: src/go/cmd/setup-dev/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
go_library(
name = "go_default_library",
srcs = ["main.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/setup-dev",
visibility = ["//visibility:public"],
deps = [
"//src/go/pkg/configutil:go_default_library",
"//src/go/pkg/kubeutils:go_default_library",
"//src/go/pkg/robotauth:go_default_library",
"//src/go/pkg/setup:go_default_library",
"//src/go/pkg/setup/util:go_default_library",
"@com_github_cenkalti_backoff//:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/schema:go_default_library",
"@io_k8s_client_go//dynamic:go_default_library",
"@io_k8s_client_go//tools/clientcmd:go_default_library",
"@io_k8s_client_go//tools/clientcmd/api:go_default_library",
"@org_golang_x_oauth2//:go_default_library",
"@org_golang_x_oauth2//google:go_default_library",
],
)
go_binary(
name = "setup-dev",
embed = [":go_default_library"],
visibility = ["//visibility:public"],
)
================================================
FILE: src/go/cmd/setup-dev/main.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"context"
"errors"
"flag"
"fmt"
"log/slog"
"net/http"
"os"
"os/exec"
"regexp"
"time"
"github.com/cenkalti/backoff"
"github.com/googlecloudrobotics/core/src/go/pkg/configutil"
"github.com/googlecloudrobotics/core/src/go/pkg/kubeutils"
"github.com/googlecloudrobotics/core/src/go/pkg/robotauth"
"github.com/googlecloudrobotics/core/src/go/pkg/setup"
"github.com/googlecloudrobotics/core/src/go/pkg/setup/util"
"github.com/googlecloudrobotics/ilog"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/tools/clientcmd"
clientapi "k8s.io/client-go/tools/clientcmd/api"
)
const robotPrefix = "dev-"
var (
project = flag.String("project", "", "Project ID for the Google Cloud Platform")
robotName = flag.String("robot-name", "", "Robot name (default: select interactively)")
)
func parseFlags() {
flag.Parse()
if *project == "" {
fmt.Println("ERROR: --project not specified")
os.Exit(1)
}
}
func main() {
parseFlags()
logHandler := ilog.NewLogHandler(slog.LevelInfo, os.Stderr)
slog.SetDefault(slog.New(logHandler))
ctx := context.Background()
f := &util.DefaultFactory{}
vars, err := configutil.ReadConfig(*project)
if err != nil {
slog.Error("Failed to read config", ilog.Err(err))
os.Exit(1)
}
domain, ok := vars["CLOUD_ROBOTICS_DOMAIN"]
if !ok || domain == "" {
domain = fmt.Sprintf("www.endpoints.%s.cloud.goog", *project)
}
tokenSource, err := google.DefaultTokenSource(context.Background(), "https://www.googleapis.com/auth/cloud-platform")
if err != nil {
slog.Error("Failed to create OAuth2 token source", ilog.Err(err))
os.Exit(1)
}
k8sCfg := kubeutils.BuildCloudKubernetesConfig(tokenSource, domain)
k8s, err := dynamic.NewForConfig(k8sCfg)
if err != nil {
slog.Error("Failed to create kubernetes client", ilog.Err(err))
os.Exit(1)
}
robotGVR := schema.GroupVersionResource{Group: "registry.cloudrobotics.com", Version: "v1alpha1", Resource: "robots"}
robotClient := k8s.Resource(robotGVR).Namespace("default")
*robotName, err = setup.GetRobotName(ctx, f, robotClient, *robotName)
if err != nil {
slog.Error("Failed to get robot name", ilog.Err(err))
os.Exit(1)
}
if err := createKubeRelayEntry(*project, domain, *robotName); err != nil {
slog.Error("Failed to create kubectl context", ilog.Err(err))
os.Exit(1)
}
// TODO(ensonic): these are only used for the ssh-app
// dev credentials are always created
client := oauth2.NewClient(context.Background(), tokenSource)
if err := setupDevCredentials(client, domain, *robotName); err != nil {
slog.Error("Failed to set up credentials", ilog.Err(err))
os.Exit(1)
}
slog.Info("Setup complete.")
}
// createKubeRelayEntry writes cluster configuration to ~/.kube/config,
// pointing to the kubernetes-relay-server running on GKE and linked to the
// given robot.
func createKubeRelayEntry(projectID string, domain string, robotName string) error {
rules := clientcmd.NewDefaultClientConfigLoadingRules()
config, err := rules.Load()
if err != nil {
return err
}
if config.AuthInfos["cloud-robotics-gcp"] == nil {
config.AuthInfos["cloud-robotics-gcp"] = &clientapi.AuthInfo{
AuthProvider: &clientapi.AuthProviderConfig{Name: "gcp"},
}
}
name := fmt.Sprintf("%s-robot", projectID)
url := fmt.Sprintf("https://%s/apis/core.kubernetes-relay/client/%s", domain, robotName)
if config.Clusters[name] == nil {
config.Clusters[name] = &clientapi.Cluster{}
}
// Always overwrite the URL because it encodes the robot we're using.
config.Clusters[name].Server = url
if config.Contexts[name] == nil {
config.Contexts[name] = &clientapi.Context{
AuthInfo: "cloud-robotics-gcp",
Cluster: name,
Namespace: "default",
}
}
if err := clientcmd.WriteToFile(*config, rules.GetDefaultFilename()); err != nil {
return err
}
fmt.Printf("Robot context created, use with: kubectl --context %s\n", name)
return nil
}
// setupDevCredentials generates a workstation ID for use with the Token Vendor then
// calls in to CreateAndPublishCredentialsToCloud to create and publish a private key.
func setupDevCredentials(client *http.Client, domain string, robotName string) error {
hostname, err := os.Hostname()
if err != nil {
return fmt.Errorf("Failed to query hostname: %v", err)
}
auth := &robotauth.RobotAuth{
RobotName: robotName,
ProjectId: *project,
Domain: domain,
PublicKeyRegistryId: makeIdentifier(hostname),
}
slog.Info("Creating new private key")
if err := auth.CreatePrivateKey(); err != nil {
return err
}
if err := setup.PublishCredentialsToCloud(client, auth /*retries*/, 1); err != nil {
return err
}
if err := auth.StoreInFile(); err != nil {
return fmt.Errorf("Failed to store private key: %v", err)
}
return nil
}
// makeIdentifier converts a string to a valid robot identifier by adding a prefix
// and removing invalid characters.
func makeIdentifier(base string) string {
invalid := regexp.MustCompile("[^a-zA-Z0-9_.~+%-]+")
return robotPrefix + invalid.ReplaceAllString(base, "")
}
func containerExists(container string) (bool, error) {
cmd := exec.Command("docker", "inspect", container)
output, err := cmd.CombinedOutput()
if err != nil && bytes.HasPrefix(output, []byte("Error: No such object")) {
return false, nil
} else if err != nil {
return false, fmt.Errorf("`docker inspect %s` failed (%v): %s", container, err, output)
}
return true, nil
}
// stopContainerIfNeeded stops a container if it exists, then waits for it to
// be automatically deleted. It assumes the container was run with --rm.
func stopContainerIfNeeded(container string) error {
if exists, err := containerExists(container); err != nil {
return err
} else if !exists {
return nil
}
slog.Info("Stopping container", slog.String("Container", container))
cmd := exec.Command("docker", "stop", container)
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("`docker stop %s` failed: %v", container, err)
}
// Wait for the container to be deleted.
return backoff.Retry(
func() error {
if stillExists, err := containerExists(container); err != nil {
return backoff.Permanent(err)
} else if stillExists {
return errors.New("container exists")
}
return nil
},
backoff.NewConstantBackOff(100*time.Millisecond),
)
}
================================================
FILE: src/go/cmd/setup-dev/setup-dev.md
================================================
# setup-dev command-line tool
Use the `setup-dev` tool to connect your workstation to a robot. This tool will
set up a Kubernetes context for connecting to a robot through Kubernetes relay.
```bash
setup-dev --project my-project
```
================================================
FILE: src/go/cmd/setup-robot/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push")
load("@rules_pkg//pkg:tar.bzl", "pkg_tar")
go_library(
name = "go_default_library",
srcs = ["main.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/setup-robot",
visibility = ["//visibility:private"],
deps = [
"//src/go/pkg/configutil:go_default_library",
"//src/go/pkg/gcr:go_default_library",
"//src/go/pkg/kubeutils:go_default_library",
"//src/go/pkg/robotauth:go_default_library",
"//src/go/pkg/setup:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_spf13_pflag//:go_default_library",
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_apimachinery//pkg/api/errors:go_default_library",
"@io_k8s_apimachinery//pkg/api/validation:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/schema:go_default_library",
"@io_k8s_client_go//dynamic:go_default_library",
"@io_k8s_client_go//kubernetes:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
"@org_golang_google_api//option:go_default_library",
"@org_golang_x_oauth2//:go_default_library",
],
)
go_test(
name = "go_default_test",
size = "small",
srcs = ["main_test.go"],
embed = [":go_default_library"],
visibility = ["//visibility:private"],
deps = [
"//src/go/pkg/apis/registry/v1alpha1:go_default_library",
"@io_k8s_apimachinery//pkg/api/errors:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured:go_default_library",
"@io_k8s_apimachinery//pkg/runtime:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/schema:go_default_library",
"@io_k8s_client_go//dynamic/fake:go_default_library",
"@io_k8s_client_go//testing:go_default_library",
],
)
pkg_tar(
name = "base_image_with_files_layer",
srcs = [
"//src/app_charts/base:base-robot",
"//src/go/cmd/synk",
"@kubernetes_helm//:helm",
],
extension = "tar.gz",
package_dir = "/setup-robot-files",
)
oci_image(
name = "base_image_with_files",
base = "@distroless_cc",
tars = [":base_image_with_files_layer"],
)
go_binary(
name = "setup-robot-app",
embed = [":go_default_library"],
visibility = ["//visibility:private"],
)
pkg_tar(
name = "setup-robot-image-layer",
srcs = [":setup-robot-app"],
extension = "tar.gz",
)
oci_image(
name = "setup-robot-image",
base = ":base_image_with_files",
entrypoint = ["/setup-robot-app"],
tars = [":setup-robot-image-layer"],
visibility = ["//visibility:public"],
)
oci_push(
name = "setup-robot.push",
image = ":setup-robot-image",
# repository is required, even in container_push
repository = "TODO-registry",
visibility = ["//visibility:public"],
)
================================================
FILE: src/go/cmd/setup-robot/main.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"context"
"fmt"
"log/slog"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"github.com/googlecloudrobotics/core/src/go/pkg/configutil"
"github.com/googlecloudrobotics/core/src/go/pkg/gcr"
"github.com/googlecloudrobotics/core/src/go/pkg/kubeutils"
"github.com/googlecloudrobotics/core/src/go/pkg/robotauth"
"github.com/googlecloudrobotics/core/src/go/pkg/setup"
"github.com/googlecloudrobotics/ilog"
"github.com/pkg/errors"
flag "github.com/spf13/pflag"
"golang.org/x/oauth2"
"google.golang.org/api/option"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/validation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
var (
robotName = new(string)
project = flag.String("project", "", "Project ID for the Google Cloud Platform")
robotType = flag.String("robot-type", "", "Robot type. Optional if the robot is already registered.")
registryID = flag.String("registry-id", "", "The ID used when writing the public key to the cloud registry. Default: robot-.")
labels = flag.String("labels", "", "Robot labels. Optional if the robot is already registered.")
annotations = flag.String("annotations", "", "Robot annotations. Optional if the robot is already registered.")
crSyncer = flag.Bool("cr-syncer", true, "Set up the cr-syncer, and create a Robot CR in the cloud cluster.")
fluentd = flag.Bool("fluentd", true, "Set up fluentd to upload logs to Stackdriver.")
fluentbit = flag.Bool("fluentbit", false, "Set up fluentbit to upload logs to Stackdriver.")
logPrefixSubdomain = flag.String("log-prefix-subdomain", "", "Subdomain to prepend to Fluentbit log tag prefix.")
dockerDataRoot = flag.String("docker-data-root", "/var/lib/docker", "This should match data-root in /etc/docker/daemon.json.")
podCIDR = flag.String("pod-cidr", "192.168.9.0/24",
"The range of Pod IP addresses in the cluster. This should match the CNI "+
"configuration (eg Cilium's clusterPoolIPv4PodCIDR). If this is incorrect, "+
"pods will get 403 Forbidden when trying to reach the metadata server.")
robotAuthentication = flag.Bool("robot-authentication", true, "Set up robot authentication.")
runningOnGKE = flag.Bool("running-on-gke", false, "If running on GKE, skip setup steps that are unnecessary and will fail.")
robotGVR = schema.GroupVersionResource{
Group: "registry.cloudrobotics.com",
Version: "v1alpha1",
Resource: "robots",
}
)
const (
filesDir = "/setup-robot-files"
helmPath = filesDir + "/helm"
synkPath = filesDir + "/synk"
numDNSRetries = 6
numServiceRetries = 6
// commaSentinel is used when parsing labels or annotations.
commaSentinel = "_COMMA_SENTINEL_"
baseNamespace = "default"
)
func parseFlags() {
flag.Usage = func() {
fmt.Fprintln(os.Stderr, "Usage: setup-robot --project [OPTIONS]")
fmt.Fprintln(os.Stderr, " robot-name")
fmt.Fprintln(os.Stderr, " Robot name")
fmt.Fprintln(os.Stderr, "")
flag.PrintDefaults()
}
flag.Parse()
if flag.NArg() < 1 {
flag.Usage()
fmt.Println("ERROR: robot-name is required.")
os.Exit(1)
} else if flag.NArg() > 1 {
flag.Usage()
fmt.Printf("ERROR: too many positional arguments (%d), expected 1.", flag.NArg())
os.Exit(1)
} else if errs := validation.NameIsDNS1035Label(flag.Arg(0), false); len(errs) > 0 {
fmt.Printf("ERROR: invalid cluster name %q: %s", flag.Arg(0), strings.Join(errs, ", "))
os.Exit(1)
}
*robotName = flag.Arg(0)
if *project == "" {
flag.Usage()
fmt.Println("ERROR: --project is required.")
os.Exit(1)
}
if *registryID == "" {
*registryID = fmt.Sprintf("robot-%s", *robotName)
}
if *fluentd && *fluentbit {
flag.Usage()
fmt.Println("ERROR: --fluentd and --fluenetbit cannot be enabled at the same time.")
os.Exit(1)
}
}
// parseKeyValues splits a string on ',' and the entries on '=' to build a map.
func parseKeyValues(s string) (map[string]string, error) {
lset := map[string]string{}
if s == "" {
return lset, nil
}
// To handle escaped commas, we replace them with a sentinel, then
// restore them after splitting individual values.
s = strings.ReplaceAll(s, "\\,", commaSentinel)
for _, l := range strings.Split(s, ",") {
l = strings.ReplaceAll(l, commaSentinel, ",")
parts := strings.SplitN(l, "=", 2)
if len(parts) != 2 {
return nil, errors.New("not a key/value pair")
}
lset[parts[0]] = parts[1]
}
return lset, nil
}
// checkRobotName tests whether a Robot resource exists in the local cluster
// with a different name. It is not safe to rerun setup-robot with a different
// name as the chart-assignment-controller doesn't allow the clusterName field to change.
func checkRobotName(ctx context.Context, client dynamic.Interface) error {
robots, err := client.Resource(robotGVR).Namespace("default").List(ctx, metav1.ListOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return nil
}
return errors.Wrap(err, "list local robots")
}
for _, r := range robots.Items {
if r.GetName() != *robotName {
return fmt.Errorf(`this cluster was already set up with a different name. It is not safe to rename an existing cluster.
- either, use the old name:
setup_robot.sh %q [...]
- or, reset the cluster before renaming it:
sudo kubeadm reset
setup_robot.sh %q [...]`, r.GetName(), *robotName)
}
}
return nil
}
func main() {
parseFlags()
logHandler := ilog.NewLogHandler(slog.LevelInfo, os.Stderr)
slog.SetDefault(slog.New(logHandler))
ctx := context.Background()
envToken := os.Getenv("ACCESS_TOKEN")
if envToken == "" {
slog.Error("ACCESS_TOKEN environment variable is required.")
os.Exit(1)
}
registry := os.Getenv("REGISTRY")
if registry == "" {
slog.Error("REGISTRY environment variable is required.")
os.Exit(1)
}
parsedLabels, err := parseKeyValues(*labels)
if err != nil {
slog.Error("Invalid labels", slog.String("Labels", *labels), ilog.Err(err))
os.Exit(1)
}
parsedAnnotations, err := parseKeyValues(*annotations)
if err != nil {
slog.Error("Invalid annotations", slog.String("Annotations", *annotations), ilog.Err(err))
os.Exit(1)
}
// Wait for in-cluster DNS to become available, otherwise
// configutil.ReadConfig() may fail.
if err := setup.WaitForDNS("storage.googleapis.com", numDNSRetries); err != nil {
slog.Error("Failed to resolve storage.googleapis.com. Please retry in 5 minutes.", ilog.Err(err))
os.Exit(1)
}
// Set up the OAuth2 token source.
tokenSource := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: envToken})
vars, err := configutil.ReadConfig(*project, option.WithTokenSource(tokenSource))
if err != nil {
slog.Error("Failed to read config for project", ilog.Err(err))
os.Exit(1)
}
domain, ok := vars["CLOUD_ROBOTICS_DOMAIN"]
if !ok || domain == "" {
domain = fmt.Sprintf("www.endpoints.%s.cloud.goog", *project)
}
// Wait until we can resolve the project domain. This may require DNS propagation.
if err := setup.WaitForDNS(domain, numDNSRetries); err != nil {
slog.Error("Failed to resolve cloud cluster. Please retry in 5 minutes.", ilog.Err(err))
os.Exit(1)
}
// Connect to the surrounding k8s cluster.
localConfig, err := rest.InClusterConfig()
if err != nil {
slog.Error("Failed to load in-cluster config", ilog.Err(err))
os.Exit(1)
}
k8sLocalClientSet, err := kubernetes.NewForConfig(localConfig)
if err != nil {
slog.Error("Failed to create kubernetes client set", ilog.Err(err))
os.Exit(1)
}
if _, err := k8sLocalClientSet.AppsV1().Deployments("default").Get(ctx, "app-rollout-controller", metav1.GetOptions{}); err == nil {
// It's important to avoid deploying the cloud-robotics
// metadata-server in the same cluster as the token-vendor,
// otherwise we'll break auth for all robot clusters.
slog.Error("The local context contains a app-rollout-controller deployment. It is not safe to run robot setup on a GKE cloud cluster.")
os.Exit(1)
}
k8sLocalDynamic, err := dynamic.NewForConfig(localConfig)
if err != nil {
slog.Error("Failed to create dynamic client set", ilog.Err(err))
os.Exit(1)
}
if err := checkRobotName(ctx, k8sLocalDynamic); err != nil {
slog.Error("RobotName", ilog.Err(err))
os.Exit(1)
}
if *robotAuthentication {
// Set up robot authentication.
auth := &robotauth.RobotAuth{
RobotName: *robotName,
ProjectId: *project,
Domain: domain,
PublicKeyRegistryId: *registryID,
}
slog.Info("Creating new private key")
if err := auth.CreatePrivateKey(); err != nil {
slog.Error("Failed creating key", ilog.Err(err))
os.Exit(1)
}
httpClient := oauth2.NewClient(ctx, tokenSource)
if err := setup.PublishCredentialsToCloud(httpClient, auth, numServiceRetries); err != nil {
slog.Error("Failed to publish credentials.", ilog.Err(err))
os.Exit(1)
}
if err := auth.StoreInK8sSecret(ctx, k8sLocalClientSet, baseNamespace); err != nil {
slog.Error("Failed to write auth secret", ilog.Err(err))
os.Exit(1)
}
if err := gcr.UpdateGcrCredentials(ctx, k8sLocalClientSet, auth); err != nil {
slog.Error("Failed to update credentials", ilog.Err(err))
os.Exit(1)
}
}
slog.Info("Initializing Synk")
output, err := exec.Command(synkPath, "init").CombinedOutput()
if err != nil {
slog.Error("Synk init failed.", ilog.Err(err), slog.String("Output", string(output)))
os.Exit(1)
}
appManagement := configutil.GetBoolean(vars, "APP_MANAGEMENT", true)
// Use "robot" as a suffix for consistency for Synk deployments.
installChartOrDie(ctx, k8sLocalClientSet, domain, registry, "base-robot", baseNamespace,
"base-robot-0.0.1.tgz", appManagement)
// Set up Robot CR as a last step (local CR needs CRD to be deployed)
if *crSyncer {
// Set up client for cloud k8s cluster, to create/update the Robot CR there.
k8sCloudCfg := kubeutils.BuildCloudKubernetesConfig(tokenSource, domain)
k8sCloudDynamic, err := dynamic.NewForConfig(k8sCloudCfg)
if err != nil {
slog.Error("Failed to create k8s client", ilog.Err(err))
os.Exit(1)
}
if err := createOrUpdateRobot(ctx, k8sCloudDynamic, parsedLabels, parsedAnnotations); err != nil {
slog.Error("Failed to create/update cloud robot CR", slog.String("Name", *robotName), ilog.Err(err))
os.Exit(1)
}
} else {
// Creating a Robot CR in the cloud would make the app-rollout-controller
// create ChartAssignments in the cloud, but if the cr-syncer is disabled,
// these would not be synced/installed.
// Hence we create a local Robot CR to keep the same interface.
if err := createOrUpdateRobot(ctx, k8sLocalDynamic, parsedLabels, parsedAnnotations); err != nil {
slog.Error("Failed to create/update local robot CR", slog.String("Name", *robotName), ilog.Err(err))
os.Exit(1)
}
}
slog.Info("Setup complete")
}
func helmValuesStringFromMap(varMap map[string]string) string {
varList := []string{}
for k, v := range varMap {
varList = append(varList, fmt.Sprintf("%s=%s", k, v))
}
return strings.Join(varList, ",")
}
// installChartOrDie installs a chart using Synk.
func installChartOrDie(ctx context.Context, cs *kubernetes.Clientset, domain, registry, name, namespace, chartPath string, appManagement bool) {
// ensure namespace for chart exists
if _, err := cs.CoreV1().Namespaces().Create(ctx,
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
},
metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
slog.Error("Failed to create namespace.", slog.String("Namespace", namespace), ilog.Err(err))
os.Exit(1)
}
vars := helmValuesStringFromMap(map[string]string{
"domain": domain,
"registry": registry,
"project": *project,
"app_management": strconv.FormatBool(appManagement),
"cr_syncer": strconv.FormatBool(*crSyncer),
"fluentd": strconv.FormatBool(*fluentd),
"fluentbit": strconv.FormatBool(*fluentbit),
"log_prefix_subdomain": *logPrefixSubdomain,
"docker_data_root": *dockerDataRoot,
"pod_cidr": *podCIDR,
"robot_authentication": strconv.FormatBool(*robotAuthentication),
"running_on_gke": strconv.FormatBool(*runningOnGKE),
"robot.name": *robotName,
})
slog.Info("Installing chart using Synk",
slog.String("Chart", name),
slog.String("Path", chartPath))
output, err := exec.Command(
helmPath,
"template",
"--set-string", vars,
"--name", name,
"--namespace", namespace,
filepath.Join(filesDir, chartPath),
).CombinedOutput()
if err != nil {
slog.Error("Synk install failed.",
slog.String("Chart", name),
ilog.Err(err),
slog.String("Helm output", string(output)))
os.Exit(1)
}
cmd := exec.Command(
synkPath,
"apply",
name,
"-n", namespace,
"-f", "-",
)
// Helm writes the templated manifests and errors alike to stderr.
// So we can just take the combined output as is.
cmd.Stdin = bytes.NewReader(output)
if output, err = cmd.CombinedOutput(); err != nil {
slog.Error("Synk install failed.",
slog.String("Chart", name),
ilog.Err(err),
slog.String("Synk output", string(output)))
os.Exit(1)
}
}
func createOrUpdateRobot(ctx context.Context, k8sDynamicClient dynamic.Interface, labels map[string]string, annotations map[string]string) error {
const masterHost = "cloudrobotics.com/master-host"
labels["cloudrobotics.com/robot-name"] = *robotName
host := os.Getenv("HOST_HOSTNAME")
if host != "" && annotations[masterHost] == "" {
annotations[masterHost] = host
}
crc_version := os.Getenv("CRC_VERSION")
if crc_version != "" {
annotations["cloudrobotics.com/crc-version"] = crc_version
}
robotClient := k8sDynamicClient.Resource(robotGVR).Namespace("default")
return setup.CreateOrUpdateRobot(ctx, robotClient, *robotName, *robotType, *project, labels, annotations)
}
================================================
FILE: src/go/cmd/setup-robot/main_test.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"net/http"
"os"
"reflect"
"testing"
registry "github.com/googlecloudrobotics/core/src/go/pkg/apis/registry/v1alpha1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
dynfake "k8s.io/client-go/dynamic/fake"
k8stest "k8s.io/client-go/testing"
)
func TestParseKeyValues_ReturnsEmptyMapOnEmptyInput(t *testing.T) {
_, err := parseKeyValues("")
if err != nil {
t.Errorf("Empty should be okay, but returned %v", err)
}
}
func TestParseKeyValues_HandlesSingleEntry(t *testing.T) {
l, err := parseKeyValues("foo=bar")
if err != nil {
t.Errorf("Failed to parse single entry, but returned %v", err)
}
v, ok := l["foo"]
if !ok {
t.Errorf("No 'foo' entry created")
}
if v != "bar" {
t.Errorf("labels['foo'] should be 'bar', but is %q", v)
}
}
func TestParseKeyValues_HandlesMultipleEntries(t *testing.T) {
l, err := parseKeyValues("foo=bar,zoo=zar")
if err != nil {
t.Errorf("Failed to parse single entry, but returned %v", err)
}
v, ok := l["foo"]
if !ok {
t.Errorf("No 'foo' entry created")
}
if v != "bar" {
t.Errorf("labels['foo'] should be 'bar', but is %q", v)
}
v, ok = l["zoo"]
if !ok {
t.Errorf("No 'zoo' entry created")
}
if v != "zar" {
t.Errorf("labels['zoo'] should be 'zar', but is %q", v)
}
}
func TestParseKeyValues_HandlesEscapedCommas(t *testing.T) {
l, err := parseKeyValues("foo=bar\\,baz,zoo=zar")
if err != nil {
t.Errorf("Failed to parse single entry, but returned %v", err)
}
v, ok := l["foo"]
if !ok {
t.Errorf("No 'foo' entry created")
}
if v != "bar,baz" {
t.Errorf("labels['foo'] should be 'bar,baz', but is %q", v)
}
v, ok = l["zoo"]
if !ok {
t.Errorf("No 'zoo' entry created")
}
if v != "zar" {
t.Errorf("labels['zoo'] should be 'zar', but is %q", v)
}
}
func TestParseKeyValues_HandlesSpaces(t *testing.T) {
l, err := parseKeyValues("foo=bar baz")
if err != nil {
t.Errorf("Failed to parse single entry, but returned %v", err)
}
v, ok := l["foo"]
if !ok {
t.Errorf("No 'foo' entry created")
}
if v != "bar baz" {
t.Errorf("labels['foo'] should be 'bar baz', but is %q", v)
}
}
func TestCheckRobotName_SucceedsWhenCRDNotFound(t *testing.T) {
ctx := context.Background()
sc := runtime.NewScheme()
*robotName = "robot_name"
c := dynfake.NewSimpleDynamicClientWithCustomListKinds(sc,
map[schema.GroupVersionResource]string{
robotGVR: "RobotList",
},
)
// In a fresh cluster, the Robot CRD doesn't exist, so GET robots
// returns a 404.
c.PrependReactor("list", "robots", func(k8stest.Action) (bool, runtime.Object, error) {
return true, nil, &k8serrors.StatusError{metav1.Status{
Status: metav1.StatusFailure,
Code: http.StatusNotFound,
Reason: metav1.StatusReasonNotFound,
Message: "the server could not find the requested resource",
}}
})
err := checkRobotName(ctx, c)
if err != nil {
t.Errorf("checkRobotName() failed unexpectedly: %v", err)
}
}
func TestCheckRobotName(t *testing.T) {
ctx := context.Background()
sc := runtime.NewScheme()
registry.AddToScheme(sc)
*robotName = "robot_name"
tests := []struct {
desc string
robots []runtime.Object
wantError bool
}{
{
desc: "empty cluster",
robots: []runtime.Object{},
wantError: false,
},
{
desc: "robot with same name",
robots: []runtime.Object{
&unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "registry.cloudrobotics.com/v1alpha1",
"kind": "Robot",
"metadata": map[string]interface{}{
"name": *robotName,
"namespace": "default",
},
},
},
},
wantError: false,
},
{
desc: "robot with other name",
robots: []runtime.Object{
&unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "registry.cloudrobotics.com/v1alpha1",
"kind": "Robot",
"metadata": map[string]interface{}{
"name": "other_name",
"namespace": "default",
},
},
},
},
wantError: true,
},
}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
c := dynfake.NewSimpleDynamicClient(sc, tc.robots...)
err := checkRobotName(ctx, c)
if tc.wantError && err == nil {
t.Errorf("checkRobotName() succeeded unexpectedly")
}
if !tc.wantError && err != nil {
t.Errorf("checkRobotName() failed unexpectedly: %v", err)
}
})
}
}
func TestCreateOrUpdateRobot_Succeeds(t *testing.T) {
ctx := context.Background()
hostname, err := os.Hostname()
if err != nil {
t.Fatal("Could not determine hostname")
}
os.Setenv("HOST_HOSTNAME", hostname)
sc := runtime.NewScheme()
registry.AddToScheme(sc)
*robotName = "robot_name"
tests := []struct {
desc string
labels map[string]string
annotations map[string]string
robot *registry.Robot
wantLabels map[string]string
wantAnnotations map[string]string
}{
{
desc: "other robot",
labels: map[string]string{},
annotations: map[string]string{},
robot: ®istry.Robot{
ObjectMeta: metav1.ObjectMeta{
Name: "other_robot",
Namespace: "default",
},
},
wantLabels: map[string]string{
"cloudrobotics.com/robot-name": "robot_name",
},
wantAnnotations: map[string]string{
"cloudrobotics.com/master-host": hostname,
},
},
{
desc: "robot without label",
labels: map[string]string{},
annotations: map[string]string{},
robot: ®istry.Robot{
ObjectMeta: metav1.ObjectMeta{
Name: *robotName,
Namespace: "default",
},
},
wantLabels: map[string]string{
"cloudrobotics.com/robot-name": "robot_name",
},
wantAnnotations: map[string]string{
"cloudrobotics.com/master-host": hostname,
},
},
{
desc: "robot with other label",
labels: map[string]string{},
annotations: map[string]string{},
robot: ®istry.Robot{
ObjectMeta: metav1.ObjectMeta{
Name: *robotName,
Namespace: "default",
Labels: map[string]string{"cloudrobotics.com/ssh-port": "22"},
},
},
wantLabels: map[string]string{
"cloudrobotics.com/robot-name": "robot_name",
"cloudrobotics.com/ssh-port": "22",
},
wantAnnotations: map[string]string{
"cloudrobotics.com/master-host": hostname,
},
},
{
desc: "robot with same hostname",
labels: map[string]string{},
annotations: map[string]string{},
robot: ®istry.Robot{
ObjectMeta: metav1.ObjectMeta{
Name: *robotName,
Namespace: "default",
Annotations: map[string]string{"cloudrobotics.com/master-host": hostname},
},
},
wantLabels: map[string]string{
"cloudrobotics.com/robot-name": "robot_name",
},
wantAnnotations: map[string]string{
"cloudrobotics.com/master-host": hostname,
},
},
{
desc: "robot with different hostname",
labels: map[string]string{},
annotations: map[string]string{},
robot: ®istry.Robot{
ObjectMeta: metav1.ObjectMeta{
Name: *robotName,
Namespace: "default",
Annotations: map[string]string{"cloudrobotics.com/master-host": "other-host"},
},
},
wantLabels: map[string]string{
"cloudrobotics.com/robot-name": "robot_name",
},
wantAnnotations: map[string]string{
"cloudrobotics.com/master-host": hostname,
},
},
{
desc: "master-host given as input",
labels: map[string]string{},
annotations: map[string]string{
"cloudrobotics.com/master-host": "correct-host",
},
robot: ®istry.Robot{
ObjectMeta: metav1.ObjectMeta{
Name: *robotName,
Namespace: "default",
Annotations: map[string]string{"cloudrobotics.com/master-host": "other-host"},
},
},
wantLabels: map[string]string{
"cloudrobotics.com/robot-name": "robot_name",
},
wantAnnotations: map[string]string{
"cloudrobotics.com/master-host": "correct-host",
},
},
}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
c := dynfake.NewSimpleDynamicClient(sc, tc.robot)
if err := createOrUpdateRobot(ctx, c, tc.labels, tc.annotations); err != nil {
t.Fatalf("createOrUpdateRobot() failed unexpectedly: %v", err)
}
robotClient := c.Resource(robotGVR).Namespace("default")
robot, err := robotClient.Get(ctx, *robotName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed getting robot: %v", err)
}
got, ok, err := unstructured.NestedStringMap(robot.Object, "metadata", "labels")
if err != nil {
t.Fatalf("failed parsing robot labels: %v", err)
}
if !ok {
t.Fatalf("robot %q is missing the label map", *robotName)
}
if !reflect.DeepEqual(got, tc.wantLabels) {
t.Errorf("labels:\n%q\nwant:\n%q", got, tc.wantLabels)
}
got, ok, err = unstructured.NestedStringMap(robot.Object, "metadata", "annotations")
if err != nil {
t.Fatalf("failed parsing robot labels: %v", err)
}
if !ok {
t.Fatalf("robot %q is missing the annotation map", *robotName)
}
if !reflect.DeepEqual(got, tc.wantAnnotations) {
t.Errorf("annotations:\n%q\nwant:\n%q", got, tc.wantAnnotations)
}
})
}
}
================================================
FILE: src/go/cmd/synk/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
go_library(
name = "go_default_library",
srcs = ["synk.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/synk",
visibility = ["//visibility:private"],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"//src/go/pkg/synk:go_default_library",
"@com_github_cenkalti_backoff//:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_spf13_cobra//:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured:go_default_library",
"@io_k8s_cli_runtime//pkg/genericclioptions:go_default_library",
"@io_k8s_cli_runtime//pkg/resource:go_default_library",
"@io_k8s_client_go//dynamic:go_default_library",
"@io_k8s_client_go//plugin/pkg/client/auth:go_default_library",
],
)
go_binary(
name = "synk",
embed = [":go_default_library"],
pure = "on",
visibility = ["//visibility:public"],
)
================================================
FILE: src/go/cmd/synk/README.md
================================================
# synk
synk is a tool to sync manifests with a cluster.
It takes set of fully populated Kubernetes resources (files or in-process objects) and applies them to a cluster as a named collection. A custom resource is used to store which manifests are part of the set, to reliably cleanup resources that are no longer part of it. Custom Resource Definitions (CRDs) are properly initialized before any other resources are installed and resource dependencies are resolved through retry logic.
It has a similar intent to [Mortar](https://github.com/kontena/mortar), but synk is usable as a Golang library and has first-class support for CRDs.
To be compatible with some of the existing charts, we allow charts to install resources to "kube-system" as the only allowed namespace outside of the chart namespace.
## Examples
```
# Apply my-chart.
helm template my-chart.tgz ... | synk apply my-chart -n default -f -
# Remove my-chart.
synk delete my-chart.v1 -n default
```
## Behavior
synk's `apply` command works as follows:
1. Resources are parsed from the input. Any namespaced resources that don't
specify a namespace already are updated with the value of the `--namespace`
(`-n`) flag.
1. Any resources that specify a namespace other than `kube-system` or the given
namespace will cause synk to fail.
1. synk creates a new `ResourceSet`, listing the resources that are to be
applied. If reapplying a previously applied set, it creates a new
ResourceSet with an incremented version number (eg `my-chart.v2`).
1. The resources are split into two groups: CRDs and non-CRDs ("regular
resources").
1. All CRDs are applied to the cluster. synk then waits for these to become
available.
1. Next, regular resources are applied to the cluster.
- Updates: most resources are updated with PATCH requests, which reduces the
risk of resource version conflicts. Resources larger than 256kB, which can't
use an annotation to store the last-applied-configuration, are updated with
POST requests. Resources that can't be updated (eg Jobs, PersistentVolumes)
are deleted and recreated according to the `canReplace()` heuristic.
- Ownership: all resources specify the ResourceSet as via ownerReferences.
This means that the Kubernetes garbage collector will delete the resources
when the ResourceSet is deleted. Note that CRDs don't have ownerReferences,
as this presents a data loss risk: if the garbage collector deleted a CRD it
would also delete all corresponding CRs.
- Retries: if a transient error is encountered when applying any regular
resource, synk retries the failed resources until the number of failed
resources is stable. This retry loop exists to handle constraints of the
resource creation order: for example, a Pod must be created after the
ServiceAccount that it uses. This isn't relevant when using Deployments
instead of bare Pods, so in practice this retry loop is not essential,
although it could be useful when using CRDs and validation webhooks.
1. The ResourceSet status is updated, describing for each resource whether it
was successfully applied or not.
1. If all resources were successfully applied, any previous ResourceSets with
the same name are deleted. This means that if a resource is removed from a
chart, the Kubernetes garbage collector will delete it after synk
successfully reapplies the newer version of the chart.
1. Finally, if this process failed due to a transient error (according to the
IsTransientErr() heuristic) and `--retries=0` hasn't been specified, `apply`
will be retried completely, including creation of a new ResourceSet. This
handles transient errors that take seconds or minutes to pass, such as
apiserver downtime.
================================================
FILE: src/go/cmd/synk/synk.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"log/slog"
"os"
"strings"
"time"
"github.com/cenkalti/backoff"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
"github.com/googlecloudrobotics/core/src/go/pkg/synk"
"github.com/googlecloudrobotics/ilog"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/dynamic"
_ "k8s.io/client-go/plugin/pkg/client/auth"
)
const (
retryBackoff = 5 * time.Second
)
var (
maxQPS int
retries uint64
cmdRoot = &cobra.Command{
Use: "synk",
Short: "A tool to sync manifests with a cluster.",
}
cmdInit = &cobra.Command{
Use: "init",
Short: "Initialize cluster for use with synk.",
Run: runInit,
}
cmdApply = &cobra.Command{
Use: "apply",
Short: "Apply manifests to the cluster.",
Run: runApply,
}
cmdDelete = &cobra.Command{
Use: "delete",
Short: "Delete all ResourceSets for the name.",
Run: runDelete,
}
restOpts = genericclioptions.NewConfigFlags(true)
resourceOpts = genericclioptions.NewResourceBuilderFlags()
)
func main() {
logHandler := ilog.NewLogHandler(slog.LevelInfo, os.Stderr)
slog.SetDefault(slog.New(logHandler))
restOpts.AddFlags(cmdRoot.PersistentFlags())
resourceOpts.AddFlags(cmdApply.PersistentFlags())
cmdRoot.PersistentFlags().IntVar(&maxQPS, "max-qps", 50, "max number of calls to the apiserver per second")
cmdApply.PersistentFlags().Uint64Var(&retries, "retries", 60, "max number of retries for transient errors, with a 5 second constant backoff")
cmdRoot.AddCommand(cmdInit)
cmdRoot.AddCommand(cmdApply)
cmdRoot.AddCommand(cmdDelete)
if err := cmdRoot.Execute(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func newSynk() (*synk.Synk, error) {
restcfg, err := restOpts.ToRESTConfig()
if err != nil {
return nil, errors.Wrap(err, "get config")
}
restcfg.QPS = float32(maxQPS)
restcfg.Burst = maxQPS * 2
discovery, err := restOpts.ToDiscoveryClient()
if err != nil {
return nil, errors.Wrap(err, "get discovery client")
}
client, err := dynamic.NewForConfig(restcfg)
if err != nil {
return nil, errors.Wrap(err, "create dynamic client")
}
s := synk.New(client, discovery)
// Invalidate to be safe. It seems that a persistent discovery cache
// likes to stay out of sync way too often.
discovery.Invalidate()
return s, nil
}
func runInit(cmd *cobra.Command, args []string) {
s, err := newSynk()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
if err := s.Init(); err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
fmt.Fprintln(os.Stderr, "Initialized successfully")
}
func runDelete(cmd *cobra.Command, args []string) {
if len(args) != 1 {
fmt.Fprintln(os.Stderr, "unrecognized number of arguments, exactly one (name) expected")
os.Exit(2)
}
s, err := newSynk()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
if err := s.Delete(context.Background(), args[0]); err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
fmt.Fprintln(os.Stderr, "Deleted successfully")
}
func runApply(cmd *cobra.Command, args []string) {
if len(args) != 1 {
fmt.Fprintln(os.Stderr, "unrecognized number of arguments, exactly one (name) expected")
os.Exit(2)
}
if err := apply(args[0]); err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
}
func apply(name string) error {
// If a target namesapce for the chart is given, enforce it.
namespace, enforceNamespace, err := restOpts.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
filenameOpts := resourceOpts.FileNameFlags.ToOptions()
result := resource.NewBuilder(restOpts).
ContinueOnError().
Unstructured(). // Must be at the top.
Local().
FilenameParam(false, &filenameOpts).
Flatten().
Do()
if result.Err() != nil {
return errors.Wrap(result.Err(), "get files")
}
infos, err := result.Infos()
if err != nil {
return errors.Wrap(err, "get file information")
}
var resources []*unstructured.Unstructured
for _, i := range infos {
resources = append(resources, i.Object.(*unstructured.Unstructured))
}
s, err := newSynk()
if err != nil {
return err
}
opts := &synk.ApplyOptions{
Namespace: namespace,
EnforceNamespace: enforceNamespace,
Log: logAction,
}
if err := backoff.Retry(
func() error {
_, err := s.Apply(context.Background(), name, opts, resources...)
if err != nil {
if synk.IsTransientErr(err) {
return err
}
return backoff.Permanent(err)
}
return nil
},
backoff.WithMaxRetries(backoff.NewConstantBackOff(retryBackoff), retries),
); err != nil {
return errors.Wrap(err, "apply files")
}
return nil
}
func logAction(r *unstructured.Unstructured, action apps.ResourceAction, status, msg string) {
// Remove some visual clutter by only showing the resource for successes.
if status == synk.StatusSuccess {
fmt.Fprintf(os.Stderr, "[%s] %s %s/%s %s/%s\n",
strings.ToUpper(status), action,
r.GetAPIVersion(), r.GetKind(),
r.GetNamespace(), r.GetName(),
)
return
}
fmt.Fprintf(os.Stderr, "[%s] %s %s/%s %s/%s: %s\n",
strings.ToUpper(status), action,
r.GetAPIVersion(), r.GetKind(),
r.GetNamespace(), r.GetName(),
msg)
}
================================================
FILE: src/go/cmd/token-vendor/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
load("@rules_oci//oci:defs.bzl", "oci_image")
load("@rules_pkg//pkg:tar.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = ["main.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/token-vendor",
visibility = ["//visibility:private"],
deps = [
"//src/go/cmd/token-vendor/api:go_default_library",
"//src/go/cmd/token-vendor/api/v1:go_default_library",
"//src/go/cmd/token-vendor/app:go_default_library",
"//src/go/cmd/token-vendor/oauth:go_default_library",
"//src/go/cmd/token-vendor/repository:go_default_library",
"//src/go/cmd/token-vendor/repository/k8s:go_default_library",
"//src/go/cmd/token-vendor/repository/memory:go_default_library",
"//src/go/cmd/token-vendor/tokensource:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@io_k8s_client_go//kubernetes:go_default_library",
"@io_k8s_client_go//plugin/pkg/client/auth:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
],
)
go_binary(
name = "token-vendor-app",
embed = [":go_default_library"],
)
pkg_tar(
name = "token-vendor-image-layer",
srcs = [":token-vendor-app"],
extension = "tar.gz",
)
oci_image(
name = "token-vendor-image",
base = "@distroless_base",
entrypoint = ["/token-vendor-app"],
tars = [":token-vendor-image-layer"],
)
================================================
FILE: src/go/cmd/token-vendor/README.md
================================================
# Token Vendor
The token vendor provides authentication for requests from the robots to our cloud environment.
The robots identity is generated during setup via a public-private key pair.
The token vendor provides APIs for registering robots through their public key and OAuth2 workflows for authenticating the signed requests from robots to cloud resources, for example to write logs to GCP Logging.
The token vendor itself is stateless and all data is stored in GCP.
The following workflows are covered by the token vendor:
* Register a robot by its public key and a unique device identifier. The public key is stored in a cloud backend.
* Retrieve a robot's public key through the device identifier
* Generate an scoped and time-limited IAM access token for access to GCP resources
* Validate a given IAM access token
## Public Key Backends
The token vendor supports multiple backends for storage of public keys for registered devices.
### Kubernetes Configmaps
The Kubernetes backend uses configmaps to store and lookup public keys.
The configmaps are stored in a configured namespace with the device identifier as name.
The public key is stored under a key in the configmap.
Devices can be removed by deleting the configmap.
### In-Memory
Stores public keys in-memory for testing.
Example:
```
# Run with memory backend
bazel run //src/go/cmd/token-vendor -- -verbose --project testproject --accepted_audience test --key-store IN_MEMORY
# Store test key
curl --data-binary "@api/v1/testdata/rsa_cert.pem" -H "Content-type: application/x-pem-file" -D - http://127.0.0.1:9090/apis/core.token-vendor/v1/public-key.publish?device-id=robot-dev-testuser
# Retrieve key
curl -D - http://127.0.0.1:9090/apis/core.token-vendor/v1/public-key.read?device-id=robot-dev-testuser
```
## API
### /public-key.publish: Robot registration
New robots get registered by a human administrator (authorized by an access
token on the request). The method add the provided public key to the configured
key store. Write access to the public key registry needs to be restricted to
refuse eg. robots to register other robots.
* URL: /apis/core.token-vendor/v1/public-key.publish
* Method: POST
* URL Params:
* device-id: unique device name (by default robot-)
* Body: application/x-pem-file
* Response: only http status code
### /public-key.configure: Customize robot registration
Configure optional properties of the on-prem robot registration. This call needs
to be authorized by an access token for a human administrator).
* URL: /apis/core.token-vendor/v1/public-key.configure
* Method: POST
* URL Params:
* device-id: unique device name (by default robot-)
* Body: json {
service-account: str, defaults to robot-service@.iam.gserviceaccount.com"
service-account-delegate: str, optional intermediate delegate
}
* Response: only http status code
### /public-key.read: Public key retrieval
To verify messages send by a robot one can fetch the public key from the
keystore using this method.
* URL: /apis/core.token-vendor/v1/public-key.read
* Method: GET
* URL Params:
* device-id: unique device name (by default robot-)
* Response: application/x-pem-file
### /token.oauth2: OAuth2 access token requests by robots
Robots sign JWTs with their local private keys. These get verified against the
public keys from the keystore. If the key is present and enabled, the token
vendor will hand out an OAuth access token for requested service account.
The service account must be either the default one (robot-service@) or the
account configured during registration (see /public-key.configure). To specify
custom service account use Subject claim.
* URL: /apis/core.token-vendor/v1/token.oauth2
* Method: POST
* Body: JWT query (TokenSource)
* Response: application/json
### /token.verify: AuthN/Z verification
Browsers or robots can query endpoints like the ws-proxy with authorization
headers or a `?token=` query parameter. They are already authenticated, and the
token vendor just checks that IAM authorizes the request.
* URL: /apis/core.token-vendor/v1/token.verify
* Method: GET
* URL Params:
* robots: boolean to indicate if robot-service account tookens are allowed
* Response: only http status code
Results are backed by a cache with a 5 minute lifetime to ease the load on the
IAM backend.
### /jwt.verify: AuthZ verification
Robots sign JWTs with their local private keys. These get verified against the
public keys from the keystore. If the key is present and enabled, the token
vendor will return status code 200.
This endpoint allows 3rd parties to do a check against the token-vendor before
the client reached the token vendor to retrieve an OAuth token.
It only validates whether the robot is known to the token vendor, there is no
further authentication or authorization done with this endpoint.
* URL: /apis/core.token-vendor/v1/jwt.verify
* Method: GET
* Headers:
* Authorization: JWT that allows authorization
* Response: only http status code
## Interactive AuthN & AuthZ (with oauth2-proxy)
We use the token vendor together with oauth2-proxy as an authentication and
authorization helper for nginx. This is essentially a poor man's IAP, used
because the GCE Ingress controller does not support IAP annotations on the GCE
objects it creates. Ingresses can be protected by it with an auth-url
annotation:
```
nginx.ingress.kubernetes.io/auth-url: "http://oauth2-proxy.default.svc.cluster.local/apis/core.token-vendor/v1/token.verify"
nginx.ingress.kubernetes.io/auth-signin: "https://{{ .Values.domain }}/oauth2/start?rd=$escaped_request_uri"
```
nginx is set up to use the oauth2-proxy as an authentication proxy, and the
oauth2-proxy has its upstream set to the token vendor.
The request for unauthenticated users flows like this:
1. nginx-ingress receives the request.
1. nginx-ingress queries the auth-url.
1. oauth2-proxy sees that there's no cookie attached, redirects the user to the
Google sign-in page.
1. When the user is signed in, oauth2-proxy's callback page sets the encrypted
auth cookie and redirects the user back to the original URL.
1. nginx-ingress again queries the auth-url.
1. oauth2-proxy sees the auth cookie, decrypts it and forwards the access token
to the token vendor.
1. The token vendor sees the access token and checks the authorization with
IAM.
1. When authorization is fine, token vendor returns 200.
1. nginx-ingress forwards the request to the backend.
## curl API Example Workflow
First set the name of your project:
```bash
PROJECT=testproject
```
Publish a key for the device `robot-dev-testuser`:
```bash
curl -D - --max-time 3 --data-binary "@api/v1/testdata/rsa_cert.pem" -H "Authorization: Bearer $(gcloud auth print-access-token)" -H "Content-type: application/x-pem-file" https://www.endpoints.${PROJECT}.cloud.goog/apis/core.token-vendor/v1/public-key.publish?device-id=robot-dev-testuser
```
Optionally set extra options for the device:
```bash
curl -D - --max-time 3 -d '{"service-account":"svc@${PROJECT}.iam.gserviceaccount.com"}' -H "Content-Type: application/json" -H "Authorization: Bearer $(gcloud auth print-access-token)" -H "Content-type: application/x-pem-file" https://www.endpoints.${PROJECT}.cloud.goog/apis/core.token-vendor/v1/public-key.configure?device-id=robot-dev-testuser
```
Read the key again:
```bash
curl -D - --max-time 3 -H "Authorization: Bearer $(gcloud auth print-access-token)" https://www.endpoints.${PROJECT}.cloud.goog/apis/core.token-vendor/v1/public-key.read?device-id=robot-dev-testuser
```
Verify if your local user account has access to the human and robot ACL:
```bash
curl -D - --max-time 3 -H "Authorization: Bearer $(gcloud auth print-access-token)" https://www.endpoints.${PROJECT}.cloud.goog/apis/core.token-vendor/v1/token.verify
```
and
```bash
curl -D - --max-time 3 -H "Authorization: Bearer $(gcloud auth print-access-token)" https://www.endpoints.${PROJECT}.cloud.goog/apis/core.token-vendor/v1/token.verify?robots=true
```
Request a cloud access token for the robot. First generate a valid JWT using the intstructions at [testdata/README.md](api/v1/testdata/README.md). Afterwards use it to request the cloud token:
```bash
JWT=$(cat api/v1/testdata/jwt.bin)
curl -D - --max-time 3 --data-binary "grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion=${JWT}" https://www.endpoints.${PROJECT}.cloud.goog/apis/core.token-vendor/v1/token.oauth2
```
You can capture the token in `$TOKEN` with:
```bash
TOKEN=$(curl -s --max-time 3 --data-binary "grant_type=urn:ietf:params:oauth:grant-typ
e:jwt-bearer&assertion=${JWT}" https://www.endpoints.${PROJECT}.cloud.goog/apis/core.token-vendor/v1/token.oauth2 | jq -r .access_token)
```
Verify if the token has access to the robots ACL (it should respond 200):
```bash
curl -D - --max-time 3 -H "Authorization: Bearer ${TOKEN}" https://www.endpoints.${PROJECT}.cloud.goog/apis/core.token-vendor/v1/token.verify?robots=true
```
Verify if the token does *not* have access to the human ACL (it should respond 403):
```bash
curl -D - --max-time 3 -H "Authorization: Bearer ${TOKEN}" https://www.endpoints.${PROJECT}.cloud.goog/apis/core.token-vendor/v1/token.verify
```
================================================
FILE: src/go/cmd/token-vendor/api/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["api.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/api",
visibility = ["//visibility:public"],
deps = ["@com_github_prometheus_client_golang//prometheus/promhttp:go_default_library"],
)
================================================
FILE: src/go/cmd/token-vendor/api/api.go
================================================
// Copyright 2022 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"context"
"log/slog"
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const (
httpTimeoutRead = 10 * time.Second
httpTimeoutWrite = 10 * time.Second
httpTimeoutHandler = 10 * time.Second
)
type constHandler []byte
func (ch constHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Metadata-Flavor", "Google")
w.Header().Set("Content-Type", "application/text")
w.WriteHeader(http.StatusOK)
w.Write(ch)
}
// Register generic API API handlers functions to the default http.DefaultServeMux
func Register() error {
http.Handle("/healthz", constHandler("ok"))
http.Handle("/metrics", promhttp.Handler())
return nil
}
// Setup and serve. Never returns. Handlers need to be registered before.
func SetupAndServe(addr string) error {
srv := &http.Server{
Addr: addr,
ReadTimeout: httpTimeoutRead,
WriteTimeout: httpTimeoutWrite,
Handler: http.TimeoutHandler(LoggingMiddleware(http.DefaultServeMux),
httpTimeoutHandler, "handler timeout"),
}
slog.Info("API listening", slog.String("Address", addr))
return srv.ListenAndServe()
}
func LoggingMiddleware(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
xFwd := r.Header.Get("X-Forwarded-For")
slog.Debug("Forwarding request",
slog.String("RemoteAddr", r.RemoteAddr),
slog.String("For", xFwd),
slog.String("Method", r.Method),
slog.String("URL", r.URL.String()))
handler.ServeHTTP(w, r)
})
}
// ErrResponse reports the statusCode and message as a http response and also
// logs it together with the given details.
func ErrResponse(ctx context.Context, w http.ResponseWriter, statusCode int, message string, details ...slog.Attr) {
finalAttrs := append([]slog.Attr{slog.Group("error",
slog.Int("code", statusCode),
slog.String("message", message),
)}, details...)
slog.LogAttrs(ctx, slog.LevelWarn, "Error response", finalAttrs...)
w.WriteHeader(statusCode)
w.Write([]byte(message))
}
================================================
FILE: src/go/cmd/token-vendor/api/v1/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["v1.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/api/v1",
visibility = ["//visibility:public"],
deps = [
"//src/go/cmd/token-vendor/api:go_default_library",
"//src/go/cmd/token-vendor/app:go_default_library",
"//src/go/cmd/token-vendor/oauth:go_default_library",
"//src/go/cmd/token-vendor/repository:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["v1_test.go"],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
deps = [
"//src/go/cmd/token-vendor/app:go_default_library",
"//src/go/cmd/token-vendor/oauth:go_default_library",
"//src/go/cmd/token-vendor/repository/k8s:go_default_library",
"//src/go/cmd/token-vendor/tokensource:go_default_library",
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_client_go//kubernetes:go_default_library",
"@io_k8s_client_go//kubernetes/fake:go_default_library",
],
)
================================================
FILE: src/go/cmd/token-vendor/api/v1/testdata/README.md
================================================
# Testdata
## RSA Keys
These files were generated like this:
```shell
openssl genrsa -out rsa_private.pem 2048
openssl rsa -in rsa_private.pem -pubout -out rsa_cert.pem
```
## Create a test JWT
Go to [jwt.io](https://jwt.io/). Use the following header:
```json
{
"alg": "RS256",
"typ": "JWT"
}
```
Use the following payload, replace `${PROJECT}` with your cloud project identifier and update the expire timestamp:
```json
{
"aud": "https://www.endpoints.${PROJECT}.cloud.goog/apis/core.token-vendor/v1/token.oauth2",
"iss": "robot-dev-testuser",
"exp": 1913373010,
"scopes": "unused",
"claims": "unused"
}
```
Use `rsa_cert.pem` and `rsa_private.pem` as keys.
================================================
FILE: src/go/cmd/token-vendor/api/v1/testdata/cloudiot/describe_device.json
================================================
{
"id": "robot-dev-device",
"name": "projects/testproject/locations/europe-west1/registries/cloud-robotics/devices/3072877074145970",
"numId": "3072877074145970",
"credentials": [
{
"publicKey": {
"format": "RSA_PEM",
"key": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvTGUksynbWhvZkHNJn8C2oXVD400jiK4T0JoyS/SwbBGwFr3OJGlPwXCsvAPAzmpTuZpge6T3pnIcO/s97sMgyld9ZYio7SQiiRV/nwYZittGf9/yfHSNDJUvT25yhuK2p3UqRCom1a3KljeXbxXvGuYG48IH0kqAQbYBI/0lAV3H5pkdXPFZC6PHltC3jySVIOg7qPXrNuxdxmg/gmzQ9+NmKvXWKATAPax1yYoESaZtc22aCZWouIdJr3baYlfBb4w8stoJPoONuyn4ard17gywb46HHGl2XoY+Y5pihwvctsFeZXLfYwUmFPfgncQHJ02lCV3+Xyk4AAZy3xDpwIDAQAB\n-----END PUBLIC KEY-----"
},
"expirationTime": "1970-01-01T00:00:00Z"
}
],
"config": {
"version": "1",
"cloudUpdateTime": "2022-08-18T15:36:53.627428Z"
},
"gatewayConfig": {}
}
================================================
FILE: src/go/cmd/token-vendor/api/v1/testdata/cloudiot/describe_device_expired_key.json
================================================
{
"id": "robot-dev-device",
"name": "projects/testproject/locations/europe-west1/registries/cloud-robotics/devices/3072877074145970",
"numId": "3072877074145970",
"credentials": [
{
"publicKey": {
"format": "RSA_PEM",
"key": "an_expired_key"
},
"expirationTime": "1990-01-01T00:00:00Z"
}
],
"config": {
"version": "1",
"cloudUpdateTime": "2022-08-18T15:36:53.627428Z"
},
"gatewayConfig": {}
}
================================================
FILE: src/go/cmd/token-vendor/api/v1/testdata/rsa_cert.pem
================================================
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvTGUksynbWhvZkHNJn8C
2oXVD400jiK4T0JoyS/SwbBGwFr3OJGlPwXCsvAPAzmpTuZpge6T3pnIcO/s97sM
gyld9ZYio7SQiiRV/nwYZittGf9/yfHSNDJUvT25yhuK2p3UqRCom1a3KljeXbxX
vGuYG48IH0kqAQbYBI/0lAV3H5pkdXPFZC6PHltC3jySVIOg7qPXrNuxdxmg/gmz
Q9+NmKvXWKATAPax1yYoESaZtc22aCZWouIdJr3baYlfBb4w8stoJPoONuyn4ard
17gywb46HHGl2XoY+Y5pihwvctsFeZXLfYwUmFPfgncQHJ02lCV3+Xyk4AAZy3xD
pwIDAQAB
-----END PUBLIC KEY-----
================================================
FILE: src/go/cmd/token-vendor/api/v1/testdata/rsa_private.pem
================================================
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAvTGUksynbWhvZkHNJn8C2oXVD400jiK4T0JoyS/SwbBGwFr3
OJGlPwXCsvAPAzmpTuZpge6T3pnIcO/s97sMgyld9ZYio7SQiiRV/nwYZittGf9/
yfHSNDJUvT25yhuK2p3UqRCom1a3KljeXbxXvGuYG48IH0kqAQbYBI/0lAV3H5pk
dXPFZC6PHltC3jySVIOg7qPXrNuxdxmg/gmzQ9+NmKvXWKATAPax1yYoESaZtc22
aCZWouIdJr3baYlfBb4w8stoJPoONuyn4ard17gywb46HHGl2XoY+Y5pihwvctsF
eZXLfYwUmFPfgncQHJ02lCV3+Xyk4AAZy3xDpwIDAQABAoH/bKMLrT/W4/wT+6PN
KU3FVbWDompywyssqlZ31Q6g9pdCCTIyw0jemlG0ewtdk3yIu8WS0Aku36NudWtP
pvDBPo+CZILRYS9N0AUNXBPl7sUA4OzVdCBnk5FTF1daV7N5CA+ZDXuDVa91fduJ
1ElSF9+weCKph0170Rsc74G570Q1ypoee/gdhkwwK5aYfTs+Z6fpaEnHaPzcwYkF
4QTsCshtoGZslmgZt8Tm7sfDDFWD20fmr1s350Ne1I7VYRFiyGbQI+IB+4pc9LSX
8CHcHIzHidKYTSG6YwpDsNRN/BkQklhsuLnNacMFFddO0IHIS0GlLBJbCRkN3b/n
/XC5AoGBAPZIN3VCpSEAw6OsM1zL4CBcq2dOb5b87rAeUmSkmW415fuyUNJJBcaf
1pliCQNeg9RzRDuHOs6BTU9i+fLcbOwSapFzGxzqnv4xmkHbj1Xs52Z+97HvKKld
xlQ/TF72WGITZVwmQWxJ9Rgx+bi7OirzOtQYoNpFoF5vHgyGrUZ7AoGBAMSosXUk
uLMzrZjH4Oetp8tq9Udyk7Xkk7booU7I0iPb/Dvadsuc9WZI+LP4R3iWmtLcJOUr
WyfliCLvbWtF4aW2vo7hvffe19krg/H26WEuBTuQGCZv8B5o8xHSecb7jbrKt9g6
r8I5kr+2tAZKLC6mtFdJgfSXNO9tveBxe+XFAoGBAIwQljnCJVeXr6wuCygDavv8
uB6QpTYhsz3GgOVsFzZuwNVcnEp77SUBUnL5JlccMa1pwKx6RB+dufIkQDK22duI
vcLqy8iuRq4aV7iMvgAIM7I/E2/GrEFma50OQsjfIXTlwwedWifUB+gyw+sjz/kN
S6/EMfbxEjuixlwpW/JxAoGBAKG5dM44F6hPPFijL0J3XcD8QZ+zCuQPiKZnopgO
sDmLJF/4Za9Gccze/5/I8sWpXMNBBRptUDZ8HTtVmK8aNdm4cfdAj5/y46EVlxl6
Cyy+0tDLzAB4F4h6mEI0y66mmkRdh1jL0lQwUo1Ua7Gsd68Zqr8JlVSWsJKhtf+I
c/JdAoGAFCSDby7ByX0W23Su3R28+9lWRSmNG79kLRLzlXsCwXTUTFh/TjAaEKgK
vwi8dtCSMNnJLCUXGx5cjTndgjTl8Woah0wy9XNNeIUjI8JPxIwXmmjppPKdCBI4
0ZyqQjgPJvwfY7lxFjE10ypv99QDlEbnwngt6bvSkY+6+DQTUDw=
-----END RSA PRIVATE KEY-----
================================================
FILE: src/go/cmd/token-vendor/api/v1/v1.go
================================================
// Copyright 2022 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1
import (
"bytes"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"log/slog"
"net/http"
"net/url"
"path"
"regexp"
"strings"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/api"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/app"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/oauth"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/repository"
"github.com/googlecloudrobotics/ilog"
)
const (
paramDeviceID = "device-id"
contentType = "content-type"
pemFile = "application/x-pem-file"
)
type HandlerContext struct {
tv *app.TokenVendor
}
func NewHandlerContext(tv *app.TokenVendor) *HandlerContext {
return &HandlerContext{tv}
}
// getQueryParam extracts a query parameter from the request URL.
//
// Multiple parameters with the same key are considered undefined and will result in error
func getQueryParam(u *url.URL, param string) (string, error) {
values, ok := u.Query()[param]
if !ok || len(values) != 1 {
err := fmt.Errorf("missing or multiple query parameter %s", param)
return "", err
}
return values[0], nil
}
// Handle requests to configure optional properties of the device registration.
//
// Method: POST
// URL parameter: device-id, the string identifier of the device
//
// Body: json {
// service-account: str, defaults to robot-service@.iam.gserviceaccount.com"
// service-account-delegate: str, optional intermediate delegate
// }
//
// Response code: only http status code
func (h *HandlerContext) publicKeyConfigureHandler(w http.ResponseWriter, r *http.Request) {
// validate request and parameters
if r.Method != http.MethodPost {
api.ErrResponse(r.Context(), w, http.StatusBadRequest,
fmt.Sprintf("method %s not allowed, only %s", r.Method, http.MethodPost))
return
}
deviceID, err := getQueryParam(r.URL, paramDeviceID)
if err != nil {
api.ErrResponse(r.Context(), w, http.StatusBadRequest, err.Error())
return
}
if !app.IsValidDeviceID(deviceID) {
api.ErrResponse(r.Context(), w, http.StatusBadRequest, "invalid device id", slog.String("DeviceID", deviceID))
return
}
body, err := io.ReadAll(r.Body)
if err != nil {
api.ErrResponse(r.Context(), w, http.StatusInternalServerError, "failed to read request body", ilog.Err(err), slog.String("DeviceID", deviceID))
return
}
var opts repository.KeyOptions
if err := json.Unmarshal([]byte(body), &opts); err != nil {
api.ErrResponse(r.Context(), w, http.StatusBadRequest, "invalid body", ilog.Err(err), slog.String("DeviceID", deviceID))
return
}
if err := h.tv.ConfigurePublicKey(r.Context(), deviceID, opts); err != nil {
if errors.Is(err, repository.ErrNotFound) {
api.ErrResponse(r.Context(), w, http.StatusNotFound, "request to repository failed", ilog.Err(err), slog.String("DeviceID", deviceID))
} else {
api.ErrResponse(r.Context(), w, http.StatusInternalServerError, "request to repository failed", ilog.Err(err), slog.String("DeviceID", deviceID))
}
}
}
// Handle requests to read a device's public key by device identifier.
//
// Method: GET
// URL parameter: device-id, the string identifier of the device
//
// Response code: 200 if key has been found
// Response body: A single public key or "" on error.
func (h *HandlerContext) publicKeyReadHandler(w http.ResponseWriter, r *http.Request) {
// validate request and parameters
if r.Method != http.MethodGet {
api.ErrResponse(r.Context(), w, http.StatusBadRequest,
fmt.Sprintf("method %s not allowed, only %s", r.Method, http.MethodGet))
return
}
deviceID, err := getQueryParam(r.URL, paramDeviceID)
if err != nil {
api.ErrResponse(r.Context(), w, http.StatusBadRequest, err.Error())
return
}
if !app.IsValidDeviceID(deviceID) {
api.ErrResponse(r.Context(), w, http.StatusBadRequest, "invalid device id", slog.String("DeviceID", deviceID))
return
}
// retrieve public key from key repository
publicKey, err := h.tv.ReadPublicKey(r.Context(), deviceID)
if err != nil {
if errors.Is(err, repository.ErrNotFound) {
api.ErrResponse(r.Context(), w, http.StatusNotFound, "request to repository failed", ilog.Err(err), slog.String("DeviceID", deviceID))
} else {
api.ErrResponse(r.Context(), w, http.StatusInternalServerError, "request to repository failed", ilog.Err(err), slog.String("DeviceID", deviceID))
}
return
}
// for missing public keys (publicKey == "") we return 200 with
// empty body for conformance with the original token vendor API.
w.Header().Add(contentType, pemFile)
w.Write([]byte(publicKey))
}
// Handle requests to publish the public key of a given device identifier.
//
// Method: POST
// URL parameter: device-id, the identifier of the device
// Request body: a single public key to publish
// Response code: 200 if publish succeeded
func (h *HandlerContext) publicKeyPublishHandler(w http.ResponseWriter, r *http.Request) {
// validate request and parameters
if r.Method != http.MethodPost {
api.ErrResponse(r.Context(), w, http.StatusBadRequest,
fmt.Sprintf("method %s not allowed, only %s", r.Method, http.MethodPost))
return
}
deviceID, err := getQueryParam(r.URL, paramDeviceID)
if err != nil {
api.ErrResponse(r.Context(), w, http.StatusBadRequest, err.Error())
return
}
if !app.IsValidDeviceID(deviceID) {
api.ErrResponse(r.Context(), w, http.StatusBadRequest, "invalid device id", slog.String("DeviceID", deviceID))
return
}
body, err := io.ReadAll(r.Body)
if err != nil {
api.ErrResponse(r.Context(), w, http.StatusInternalServerError, "failed to read request body", ilog.Err(err), slog.String("DeviceID", deviceID))
return
}
_, err = isValidPublicKey(body)
if err != nil {
api.ErrResponse(r.Context(), w, http.StatusBadRequest, "public key format error", ilog.Err(err), slog.String("DeviceID", deviceID))
return
}
// publish the key
err = h.tv.PublishPublicKey(r.Context(), deviceID, string(body))
if err != nil {
api.ErrResponse(r.Context(), w, http.StatusInternalServerError, "publish key failed", ilog.Err(err), slog.String("DeviceID", deviceID))
return
}
}
// isValidPublicKeyFormat validates the given public key in PEM format.
//
// The returned error provides details on why the validation failed.
func isValidPublicKey(pk []byte) (bool, error) {
const minSize, maxSize = 100, 18000 // educated guesses, technically unlimited
if len(pk) < minSize || len(pk) > maxSize {
return false, fmt.Errorf("invalid key size, assert %d <= %d <= %d", minSize, len(pk), maxSize)
}
pk = bytes.TrimSpace(pk)
var pkStart = []byte("-----BEGIN ")
if !bytes.HasPrefix(pk, pkStart) {
return false, fmt.Errorf("public key suffix %q missing", pk)
}
block, extraData := pem.Decode(pk)
if len(extraData) > 0 {
return false, fmt.Errorf("public key contains extra data (%d Bytes)", len(extraData))
}
if block == nil || block.Type != "PUBLIC KEY" {
return false, fmt.Errorf("failed to decode PEM block expecting public key")
}
return true, nil
}
// Handle requests to retrieve a GCP access token for access to cloud resources.
//
// The robot identifies itself using its private key to sign a JWT. The JWT is
// verified by the token vendor using the public key from the key repository and
// the device identifier from the `iss` key from the JWT's payload section.
// After successful verification, the token vendor uses its own IAM identity to
// generate an access token for the `robot-service` service account and returns
// the access token to the robot. We only accept RSA as signing algorithm right
// now. This is the default used by the metadata server's oauth package [1].
//
// [1] https://github.com/golang/oauth2/blob/f21342109be17cd214ecfcd33065b79cd571673e/jwt/jwt.go#L29
//
// Method: POST
// The body is formatted like an URL query with two parameters:
// - grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer
// - assertion=
//
// Signed JWT expected header:
// “`json
// {
//
// "alg": "RS256",
// "typ": "JWT"
//
// }
// “`
// The JWT body is expected to look like this:
// “`json
// {
//
// "aud": "" or "?token_type=access_token", // has to match the one specified in the token vendor config
// "iss": "",
// "exp": ,
// "scopes": "...", // unused
// "claims": "..." // unused
//
// }
// The response body will look like this:
// "`json
// {
//
// "access_token":"foo", // the cloud access token
// "expires_in":3600, // the cloud access token expiration in seconds from now
// "scope":"http://example1.com http://example2.com", // GCP API scope URLs from token vendor config
// "token_type":"Bearer" // static
//
// }
// “`
func (h *HandlerContext) tokenOAuth2Handler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
api.ErrResponse(r.Context(), w, http.StatusBadRequest,
fmt.Sprintf("method %s not allowed, only %s", r.Method, http.MethodPost))
return
}
body, err := io.ReadAll(r.Body)
if err != nil {
api.ErrResponse(r.Context(), w, http.StatusInternalServerError, "error reading request body", ilog.Err(err))
return
}
values, err := url.ParseQuery(string(body))
if err != nil {
api.ErrResponse(r.Context(), w, http.StatusBadRequest, err.Error())
return
}
const paramGrant = "grant_type"
const jwtGrant = "urn:ietf:params:oauth:grant-type:jwt-bearer"
grant := values.Get(paramGrant)
if grant != jwtGrant {
api.ErrResponse(r.Context(), w, http.StatusBadRequest,
fmt.Sprintf(`expected "%s=%s" in body`, paramGrant, jwtGrant))
return
}
const paramAssert = "assertion"
assertion := values.Get(paramAssert)
if _, err := isValidJWT(assertion); err != nil {
api.ErrResponse(r.Context(), w, http.StatusBadRequest,
fmt.Sprintf(`expected "%s=" in body, invalid token format: %v`, paramAssert, err))
return
}
token, err := h.tv.GetOAuth2Token(r.Context(), assertion)
if err != nil {
api.ErrResponse(r.Context(), w, http.StatusForbidden, "unable to retrieve cloud access token with given JWT", ilog.Err(err))
return
}
tokenBytes, err := json.Marshal(token)
if err != nil {
api.ErrResponse(r.Context(), w, http.StatusInternalServerError, "failed to marshal upstream response", ilog.Err(err))
return
}
w.Header().Add(contentType, "application/json")
w.Write(tokenBytes)
}
// Robots sign JWTs with their local private keys. These get verified against the
// public keys from the keystore. If the key is present and enabled, the token
// vendor will return status code 200.
// This endpoint allows 3rd parties to do a check against the token-vendor before
// the client reached the token vendor to retrieve an OAuth token.
// It only validates whether the robot is known to the token vendor, there is no
// further authentication or authorization done with this endpoint.
//
// URL: /apis/core.token-vendor/v1/jwt.verify
// Method: GET
// Headers:
// - Authorization: JWT that allows authorization
// Response: only http status code
func (h *HandlerContext) verifyJWTHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
api.ErrResponse(r.Context(), w, http.StatusMethodNotAllowed,
fmt.Sprintf("method %s not allowed, only %s", r.Method, http.MethodGet))
return
}
authHeader, ok := r.Header["Authorization"]
if !ok {
api.ErrResponse(r.Context(), w, http.StatusUnauthorized,
"request did not provide Authorization header")
return
}
if len(authHeader) != 1 {
api.ErrResponse(r.Context(), w, http.StatusBadRequest,
fmt.Sprintf("%q auth headers provided. Only 1 allowed", len(authHeader)))
return
}
// Be slightly permissive here. Allow both forms
// Authorization: Bearer ...
// Authorization: ...
jwtString := strings.TrimPrefix(authHeader[0], "Bearer ")
if _, err := h.tv.ValidateJWT(r.Context(), jwtString); err != nil {
api.ErrResponse(r.Context(), w, http.StatusForbidden, "JWT not valid", ilog.Err(err))
return
}
}
// Handle requests to verify if a given token has cloud access.
//
// The token is verified by testing if the token has `iam.serviceAccounts.actAs`
// authorization on either the `humanacl` or `robot-service` account by
// calling the GCP testIamPermissions API. This permission can be granted through e.g. the
// Service Account User (roles/iam.serviceAccountUser) role.
//
// Method: GET
// URL Parameters:
// - robots (optional): "true" to verify against `robot-service` role, else `humanacl`
// - token (optional): access token, if not given via header
// Headers (optional): X_FORWARDED_ACCESS_TOKEN or AUTHORIZATION
// See function `tokenFromRequest` for details on how to supply the token.
func (h *HandlerContext) verifyTokenHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
api.ErrResponse(r.Context(), w, http.StatusBadRequest,
fmt.Sprintf("method %s not allowed, only %s", r.Method, http.MethodGet))
return
}
robots := testForRobotACL(r.URL)
token, err := tokenFromRequest(r.URL, &r.Header)
if err != nil {
api.ErrResponse(r.Context(), w, http.StatusBadRequest, err.Error())
return
}
err = h.tv.VerifyToken(r.Context(), oauth.Token(token), robots)
if err != nil {
api.ErrResponse(r.Context(), w, http.StatusForbidden, "unable to verify token", ilog.Err(err))
return
}
w.Write([]byte("OK"))
}
// testForRobotACL determines if the "robots" parameter is set.
func testForRobotACL(u *url.URL) bool {
robots, err := getQueryParam(u, "robots")
if err != nil || robots != "true" {
return false
}
return true
}
// tokenFromRequest extracts the access token from the request.
//
// The access token can be supplied in one of the following ways, checked in the
// give order. This is based on the specification of the original java token vendor.
// 1. Header `X-Forwarded-Access-Token`: Token without prefix
// 2. Header `Authorization`: Token with "Bearer " prefix
// 3. URL Parameter `token`
func tokenFromRequest(u *url.URL, h *http.Header) (string, error) {
const fwdToken = "X-Forwarded-Access-Token"
if t := h.Get(fwdToken); t != "" {
if _, err := isValidToken(t); err != nil {
return "", err
}
return t, nil
}
const authHeader, authPrefix = "Authorization", "Bearer "
if t := h.Get(authHeader); t != "" {
if !strings.HasPrefix(t, authPrefix) {
return "", fmt.Errorf("token in header %q has no prefix %q",
authHeader, authPrefix)
}
t = strings.TrimPrefix(t, authPrefix)
if _, err := isValidToken(t); err != nil {
return "", err
}
return t, nil
}
const paramToken = "token"
t, err := getQueryParam(u, paramToken)
if err != nil {
return "", fmt.Errorf("no token in headers %q or %q and unable to get token from URL param %q: %v",
fwdToken, authHeader, paramToken, err)
}
_, err = isValidToken(t)
if err != nil {
return "", err
}
return t, nil
}
const tokenRegex = `^ya29\.[a-zA-Z0-9\.\-_]+$`
var tokenMatch = regexp.MustCompile(tokenRegex).MatchString
// isValidToken verifies the format of the token string.
//
// The returned error provides details on why the validation failed.
func isValidToken(token string) (bool, error) {
// Minimum length is dummy, maximum from documentation
// Source: https://cloud.google.com/iam/docs/reference/sts/rest/v1/TopLevel/token#response-body
const minSize, maxSize = 100, 12288
if len(token) < minSize || len(token) > maxSize {
return false, fmt.Errorf("invalid token size, assert %d <= %d <= %d",
minSize, len(token), maxSize)
}
if !tokenMatch(token) {
return false, fmt.Errorf("token failed validation against %q", tokenRegex)
}
return true, nil
}
const jwtRegex = `^[a-zA-Z0-9\.\-_]+\.[a-zA-Z0-9\.\-_]+\.[a-zA-Z0-9\.\-_]+$`
var jwtMatch = regexp.MustCompile(jwtRegex).MatchString
// isValidJWT verifies the format of an encoded JWT.
//
// The returned error provides details on why the validation failed.
func isValidJWT(jwt string) (bool, error) {
const minSize, maxSize = 100, 5000 // guess
if len(jwt) < minSize || len(jwt) > maxSize {
return false, fmt.Errorf("invalid size, assert %d <= %d <= %d",
minSize, len(jwt), maxSize)
}
if !jwtMatch(jwt) {
return false, fmt.Errorf("jwt failed validation against %q", jwtRegex)
}
return true, nil
}
// Register the API V1 API handler functions to the default http.DefaultServeMux
func Register(tv *app.TokenVendor, prefix string) error {
slog.Debug("mounting API V1", slog.String("Prefix", prefix))
h := NewHandlerContext(tv)
http.HandleFunc(path.Join(prefix, "public-key.configure"), h.publicKeyConfigureHandler)
http.HandleFunc(path.Join(prefix, "public-key.read"), h.publicKeyReadHandler)
http.HandleFunc(path.Join(prefix, "public-key.publish"), h.publicKeyPublishHandler)
http.HandleFunc(path.Join(prefix, "token.oauth2"), h.tokenOAuth2Handler)
http.HandleFunc(path.Join(prefix, "token.verify"), h.verifyTokenHandler)
http.HandleFunc(path.Join(prefix, "jwt.verify"), h.verifyJWTHandler)
return nil
}
================================================
FILE: src/go/cmd/token-vendor/api/v1/v1_test.go
================================================
package v1
import (
"bytes"
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path"
"strconv"
"strings"
"testing"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/app"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/oauth"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/repository/k8s"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/tokensource"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
)
const (
testPubKey = "testdata/rsa_cert.pem"
jwtBodyPrefix = "grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion="
saName = "robot-service@testproject.iam.gserviceaccount.com"
jwtCorrect = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJ0ZXN0YXVkIiwiaXNzIjoicm9ib3QtZGV2LXRlc3R1c2VyIiwiZXhwIjoxOTEzMzczMDEwLCJzY29wZXMiOiJ0ZXN0c2NvcGVzIiwiY2xhaW1zIjoidGVzdGNsYWltcyJ9.WJP0shiqynW9ZrmV4k78W3_nn_YA86XLK58IJYyqUF-8LAG92MraNqVqD0t6i-s90VBL64hCXlsA7zP3WlsMHOEvXCyRkGffhbJNIlJqIVTVfGvyF-ZmuaAr352n5kmKTrfTRi7h9LWTcvDgSosN438J8Jy9BT1FE9P-BHfyBUegZ15DWFAiAhz0r_Fgj7hAMXUnRdZfj3_dE0Nhi5IGs3L-0XzU-dE150ZJvtGMdIjc_QCqYHV3wtSgETKDYQoonD08n6g5GqC8nNkqrWFMttafLdPaDAsr8KWtj1dD1w9sw1YJClEzF9JOc63WNPZf8CgdU2enFW-V-2vHbUaekg"
jwtWrongSig = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJ0ZXN0YXVkIiwiaXNzIjoicm9ib3QtZGV2LXRlc3R1c2VyIiwiZXhwIjoxOTEzMzczMDEwLCJzY29wZXMiOiIuLi4iLCJjbGFpbXMiOiIuLi4ifQ.krAYHjkConzVudfXJUMiDNbVHF3RwkvOAhSCyTvOaJdlJ6sxh-TjPXo6W0yVT31qjLwhl1NYI-JlhcHX7TLiZbLCbGVXlQN2Nn4LvpbGdAH0KvSJkthqX7ld9tlVQGdlOUHCE5bBDG_9uBtpdOAv1zKUTquhyDM0qWVrQV1qUVOtwBCO6nt21l1eXgTwz50FVN33f1ZmhZfHW1u7Dq_XwBJmHFwN3aiD0NZohU7MpQiz-0u94Q9yZ588IjdZEUhSEUKrVtJjoPcxDhrXxoRMA8iP8_bMeOHteiAdYeBVBwFhu1d8pfcn6uoZROYD1xB1LWDTJx4GfQh6v3wtAwFu7Q"
jwtWrongAud = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhYmMiLCJpc3MiOiJyb2JvdC1kZXYtdGVzdHVzZXIiLCJleHAiOjE5MTMzNzMwMTAsInNjb3BlcyI6Ii4uLiIsImNsYWltcyI6Ii4uLiJ9.XIoSfJl7QE51XUt7XHvZTomuXAAjVKWhnBhCgZl91-dGO9aF_pVu9sc_kR-MODoZci9pUKaLfqLTbZkNgkwGvApXF4GZ1DBu0uG6ewbNzIA-2l67xztnGw_M5DrQpLnq31HT1hRlvB9cXOYj2qtVfQaOhZtSPeHviYXj1NiPzHIWdyZKGIYu-gofkAZACEKKDd8HBRv6bLOzgrJ9sxlsyIB_O-FzpgoGSH-bKj9QEbSazx1j7AdICq1pJ_ER9ovb0qcYqg1JPToeEB1L-GFGwZp2JAnVp2rbbwPfjQTVlGmmAu-NUA5SjbjrNSjwDnQZDBBhmx75uToptJsnC_xZAw"
jwtExpired = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJ0ZXN0YXVkIiwiaXNzIjoicm9ib3QtZGV2LXRlc3R1c2VyIiwiZXhwIjowLCJzY29wZXMiOiJ0ZXN0c2NvcGVzIiwiY2xhaW1zIjoidGVzdGNsYWltcyJ9.VgalRggp3RrwarTeNSZu-lYcjSOyH7S7g_6BIxV_RisRavbwr1liTUXEKA1fjx5zF_1I_dsPC64wXkK14lAmJFI4pMm8-oLXMgSyUOGAqicYGrRm-CeZ_xJmA37ZCKyyf7ijGCdaAqNbtnsSER3wHTIG7ccbpcEUvb57nCQnTBzlEAVqDFXh9D-7Md2SUmWXCvmWomkALnPPg1xeeWjQygQvmbvFOo37ZgD-GbuvEYr2ccc1otJVvGtpSdJmFc1fGOWy9ZkPMR9VZyrmapYlImZTX7yOfcP-TLcbKRQegt3JJKgvffRP1dhxZaB5fTwT5o5ZTTh7aLap-MUUoZN4bg"
)
type RoundTripFunc func(req *http.Request) *http.Response
func (f RoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {
return f(req), nil
}
func NewTestHTTPClient(fn RoundTripFunc) *http.Client {
return &http.Client{Transport: fn}
}
type publicKeyConfigureHandlerK8sTest struct {
desc string
configmaps []*corev1.ConfigMap
method string
deviceID string
body string
wantStatusCode int
}
func TestPublicKeyConfigureHandlerWithK8s(t *testing.T) {
var cases = []publicKeyConfigureHandlerK8sTest{
{
desc: "happy case",
configmaps: []*corev1.ConfigMap{
{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "testdevice",
Namespace: "default",
},
Data: map[string]string{"pubKey": "testkey"},
},
},
method: http.MethodPost,
deviceID: "testdevice",
body: "{}",
wantStatusCode: http.StatusOK,
},
{
desc: "wrong method, bad request",
method: http.MethodGet,
deviceID: "testdevice",
wantStatusCode: http.StatusBadRequest,
},
{
desc: "missing device-id, bad request",
method: http.MethodPost,
body: "{}",
wantStatusCode: http.StatusBadRequest,
},
{
desc: "wrong device-id, not found",
method: http.MethodPost,
deviceID: "testdevice",
body: "{}",
wantStatusCode: http.StatusNotFound,
},
}
for _, test := range cases {
t.Run(test.desc, func(t *testing.T) {
runPublicKeyConfigureHandlerWithK8sCase(t, &test)
})
}
}
func runPublicKeyConfigureHandlerWithK8sCase(t *testing.T, test *publicKeyConfigureHandlerK8sTest) {
// Setup fake K8s environment
cs := fake.NewSimpleClientset()
if err := populateK8sEnv(cs, "default", test.configmaps); err != nil {
t.Fatal(err)
}
kcl, err := k8s.NewK8sRepository(context.TODO(), cs, "default")
if err != nil {
t.Fatal(err)
}
// Setup app & API handler
tv, err := app.NewTokenVendor(context.TODO(), kcl, nil, nil, "aud", saName)
if err != nil {
t.Fatal(err)
}
h := HandlerContext{tv: tv}
// Make API call
rr := httptest.NewRecorder()
req := mustNewRequest(t, test.method, "/anything", strings.NewReader(test.body))
q := req.URL.Query()
q.Add("device-id", test.deviceID)
req.URL.RawQuery = q.Encode()
h.publicKeyConfigureHandler(rr, req)
// check API response
if rr.Code != test.wantStatusCode {
t.Errorf("wrong status code, is %d, want %d", rr.Code, test.wantStatusCode)
}
if rr.Code != http.StatusOK {
return
}
}
type publicKeyReadHandlerK8sTest struct {
desc string
configmaps []*corev1.ConfigMap
deviceID string
wantKey string
wantStatusCode int
}
func TestPublicKeyReadHandlerWithK8s(t *testing.T) {
var cases = []publicKeyReadHandlerK8sTest{
{
"key_found",
[]*corev1.ConfigMap{
{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "testdevice",
Namespace: "default",
},
Data: map[string]string{"pubKey": "testkey"},
},
},
"testdevice",
"testkey",
http.StatusOK,
},
// key not found is not an error and returns an empty response
{
"key_not_found",
[]*corev1.ConfigMap{},
"testdevice",
"",
http.StatusNotFound,
},
// for malformed configmaps we expect an error
{
"malformed_configmap",
[]*corev1.ConfigMap{
{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "testdevice",
Namespace: "default",
},
// missing Data field
},
},
"testdevice",
"",
http.StatusInternalServerError,
},
}
for _, test := range cases {
t.Run(test.desc, func(t *testing.T) {
runPublicKeyReadHandlerWithK8sCase(t, &test)
})
}
}
func populateK8sEnv(env kubernetes.Interface, ns string, maps []*corev1.ConfigMap) error {
for _, m := range maps {
if _, err := env.CoreV1().ConfigMaps(ns).Create(context.TODO(), m, metav1.CreateOptions{}); err != nil {
return err
}
}
return nil
}
func runPublicKeyReadHandlerWithK8sCase(t *testing.T, test *publicKeyReadHandlerK8sTest) {
// Setup fake K8s environment
cs := fake.NewSimpleClientset()
if err := populateK8sEnv(cs, "default", test.configmaps); err != nil {
t.Fatal(err)
}
kcl, err := k8s.NewK8sRepository(context.TODO(), cs, "default")
if err != nil {
t.Fatal(err)
}
// Setup app & API handler
tv, err := app.NewTokenVendor(context.TODO(), kcl, nil, nil, "aud", saName)
if err != nil {
t.Fatal(err)
}
h := HandlerContext{tv: tv}
// Make API call
rr := httptest.NewRecorder()
req := mustNewRequest(t, "GET", "/anything", nil)
q := req.URL.Query()
q.Add("device-id", test.deviceID)
req.URL.RawQuery = q.Encode()
h.publicKeyReadHandler(rr, req)
// check API response
if rr.Code != test.wantStatusCode {
t.Errorf("wrong status code, is %d, want %d", rr.Code, test.wantStatusCode)
}
if rr.Code != http.StatusOK {
return
}
body, err := io.ReadAll(rr.Body)
if err != nil {
t.Fatal(err)
}
gotKey := string(body)
if gotKey != test.wantKey {
t.Errorf("readHandler(..) = %v, want %v", gotKey, test.wantKey)
}
}
func mustRespBodyFromFile(t *testing.T, file string) io.ReadCloser {
isResponseBody, err := os.ReadFile(file)
if err != nil {
t.Fatal(err.Error())
}
return io.NopCloser(bytes.NewBuffer(isResponseBody))
}
func mustNewRequest(t *testing.T, method, url string, body io.Reader) *http.Request {
r, err := http.NewRequest(method, url, body)
if err != nil {
t.Fatal(err.Error())
}
return r
}
func mustFileOpen(t *testing.T, name string) io.Reader {
f, err := os.Open(name)
if err != nil {
t.Fatal(err.Error())
}
return f
}
func mustFileToString(t *testing.T, name string) string {
fp := mustFileOpen(t, name)
bytes, err := io.ReadAll(fp)
if err != nil {
t.Fatal(err.Error())
}
return string(bytes)
}
type publicKeyPublishHandlerK8sTest struct {
desc string
configmaps []*corev1.ConfigMap
deviceID string
// read key before publish
wantKeyBefore string
// publish and read key again
body io.Reader
wantKey string
}
func TestPublicKeyPublishHandlerWithK8s(t *testing.T) {
var cases = []publicKeyPublishHandlerK8sTest{
{
// happy path where no device is registered yet
"register_new_device",
[]*corev1.ConfigMap{},
"testdevice",
"",
mustFileOpen(t, path.Join("testdata", "rsa_cert.pem")),
mustFileToString(t, path.Join("testdata", "rsa_cert.pem")),
},
{
// happy path where the device is already registered
"update_device_key",
[]*corev1.ConfigMap{
{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "testdevice",
},
Data: map[string]string{"pubKey": "testkey"},
},
},
"testdevice",
"testkey",
mustFileOpen(t, path.Join("testdata", "rsa_cert.pem")),
mustFileToString(t, path.Join("testdata", "rsa_cert.pem")),
},
}
for _, test := range cases {
t.Run(test.desc, func(t *testing.T) {
runPublicKeyPublishHandlerWithK8sCase(t, &test)
})
}
}
func runPublicKeyPublishHandlerWithK8sCase(t *testing.T, test *publicKeyPublishHandlerK8sTest) {
// Setup fake K8s environment
cs := fake.NewSimpleClientset()
if err := populateK8sEnv(cs, "default", test.configmaps); err != nil {
t.Fatal(err)
}
kcl, err := k8s.NewK8sRepository(context.TODO(), cs, "default")
if err != nil {
t.Fatal(err)
}
// Setup app & API handler
tv, err := app.NewTokenVendor(context.TODO(), kcl, nil, nil, "aud", saName)
if err != nil {
t.Fatal(err)
}
h := HandlerContext{tv: tv}
// Read the current device key
rr := httptest.NewRecorder()
req := mustNewRequest(t, "GET", "/anything", nil)
q := req.URL.Query()
q.Add("device-id", test.deviceID)
req.URL.RawQuery = q.Encode()
h.publicKeyReadHandler(rr, req)
if rr.Code != http.StatusOK && rr.Code != http.StatusNotFound {
t.Errorf("before update,publicKeyReadHandler(..): wrong status code, got %d, want %d/%d",
rr.Code, http.StatusOK, http.StatusNotFound)
}
body, err := io.ReadAll(rr.Body)
if err != nil {
t.Fatal(err)
}
if rr.Code == http.StatusOK {
gotKey := string(body)
if gotKey != test.wantKeyBefore {
t.Errorf("before update,publicKeyReadHandler(..): wrong key, got %v, want %v",
gotKey, test.wantKey)
}
}
// POST a new key
rr = httptest.NewRecorder()
req = mustNewRequest(t, "POST", "/anything", test.body)
q = req.URL.Query()
q.Add("device-id", test.deviceID)
req.URL.RawQuery = q.Encode()
h.publicKeyPublishHandler(rr, req)
// check API response
if rr.Code != http.StatusOK {
t.Errorf("publicKeyPublishHandler(..): wrong status code %d, want %d", rr.Code, http.StatusOK)
}
// Read key back again
rr = httptest.NewRecorder()
req = mustNewRequest(t, "GET", "/anything", nil)
q = req.URL.Query()
q.Add("device-id", test.deviceID)
req.URL.RawQuery = q.Encode()
h.publicKeyReadHandler(rr, req)
if rr.Code != http.StatusOK {
t.Errorf("after update,publicKeyReadHandler(..): wrong status code, got %d, want %d",
rr.Code, http.StatusOK)
}
body, err = io.ReadAll(rr.Body)
if err != nil {
t.Fatal(err)
}
gotKey := string(body)
if gotKey != test.wantKey {
t.Errorf("after update,publicKeyReadHandler(..): wrong key, got %v, want %v",
gotKey, test.wantKey)
}
}
type isValidPublicKeyTest struct {
desc string
pk []byte
isValid bool
}
func TestIsValidPublicKey(t *testing.T) {
var cases = []isValidPublicKeyTest{
{"testdata key", []byte(mustFileToString(t, testPubKey)), true},
{"junk string", []byte("some junk string"), false},
}
for _, test := range cases {
t.Run(test.desc, func(t *testing.T) {
gotValid, err := isValidPublicKey(test.pk)
if test.isValid != gotValid {
t.Errorf("isValidPublicKey(..): is %v, got %v", test.isValid, gotValid)
}
if test.isValid && err != nil || !test.isValid && err == nil {
t.Errorf("isValidPublicKey(..): is %v, but got error %v", test.isValid, err)
}
})
}
}
type isValidTokenTest struct {
desc string
token string
isValid bool
}
func TestIsValidToken(t *testing.T) {
const p = "ya29."
var cases = []isValidTokenTest{
// valid
{"valid", p + "a_bc-D0348" + strings.Repeat("a", 100), true},
//invalid
{"empty", "", false}, {"too short", "abc", false},
{"wrong prefix", "ya244" + strings.Repeat("a", 100), false},
{"new line", p + strings.Repeat("a", 100) + "\n" + p + strings.Repeat("a", 100), false},
{"wrong characters", p + strings.Repeat("a", 100) + "#!", false},
}
for _, test := range cases {
t.Run(test.desc, func(t *testing.T) {
gotValid, gotErr := isValidToken(test.token)
if gotValid != test.isValid {
t.Errorf("isValidToken(%q): is %v, got %v", test.token, test.isValid, gotValid)
return
}
if (test.isValid && gotErr != nil) || (!test.isValid && gotErr == nil) {
t.Errorf("isValidToken(%q): is %v, but got error %v",
test.token, test.isValid, gotErr)
}
})
}
}
type isValidJWTTest struct {
desc string
jwt string
wantValid bool
}
func TestIsValidJWT(t *testing.T) {
var cases = []isValidJWTTest{
// valid
{"valid", strings.Repeat("a", 100) + "." + strings.Repeat("a", 100) +
"." + strings.Repeat("a", 100), true},
//invalid
{"empty", "", false}, {"too short", "abc", false}, {"no dots", strings.Repeat("a", 100), false},
{"new line", strings.Repeat("a", 100) + "\n" + strings.Repeat("a", 100), false},
{"wrong characters", strings.Repeat("a", 100) + "#!", false},
}
for _, test := range cases {
t.Run(test.desc, func(t *testing.T) {
gotValid, gotErr := isValidJWT(test.jwt)
if gotValid != test.wantValid {
t.Errorf("isValidJWT(%q): got %v, want %v, error %v",
test.jwt, gotValid, test.wantValid, gotErr)
return
}
if (test.wantValid && gotErr != nil) || (!test.wantValid && gotErr == nil) {
t.Errorf("isValidJWT(%q): got error %v, want %v",
test.jwt, gotErr, test.wantValid)
}
})
}
}
type tokenFromRequestTest struct {
desc string
h http.Header
u *url.URL
isToken string
isErr bool
}
func TestTokenFromRequest(t *testing.T) {
validToken := `ya29.a_bc-d` + strings.Repeat("a", 100)
validUrl, _ := url.Parse("http://127.0.0.1:80/?token=" + validToken)
var cases = []tokenFromRequestTest{
{
"token in auth header",
http.Header{"Authorization": {"Bearer " + validToken}},
&url.URL{},
validToken,
false,
},
{
"invalid auth header",
http.Header{"Authorization": {"SomethingElse: " + validToken}},
&url.URL{},
"",
true,
},
{
"token in x forwarded header",
http.Header{"X-Forwarded-Access-Token": {validToken}},
&url.URL{},
validToken,
false,
},
{
"invalid token in x forwarded header",
http.Header{"X-Forwarded-Access-Token": {"InvalidTokenString"}},
&url.URL{},
validToken,
true,
},
{
"token in URL",
http.Header{},
validUrl,
validToken,
false,
},
{
"no token",
http.Header{},
&url.URL{},
"",
true,
},
}
for _, test := range cases {
t.Run(test.desc, func(t *testing.T) {
gotToken, gotErr := tokenFromRequest(test.u, &test.h)
if test.isErr && gotErr == nil || !test.isErr && gotErr != nil {
t.Errorf("tokenFromRequest(..): error is %v, but got error %v", test.isErr, gotErr)
return
}
if gotErr != nil {
return
}
if gotToken != test.isToken {
t.Errorf("tokenFromRequest(..): is %q, got %q", test.isToken, gotToken)
return
}
})
}
}
type VerifyTokenHandlerTest struct {
desc string
// handler request variables (test -> handler)
reqIsRobots bool
// IAM request variables (token verifier -> GCP IAM)
iamReqIsUrl string
// fake IAM response variables (GCP IAM -> token verifier)
iamRespPermissions string
iamRespStatusCode int
// handler response variables (handler -> test)
handlerIsStatusCode int
}
func TestVerifyTokenHandler(t *testing.T) {
const permHappy = `{"permissions":["iam.serviceAccounts.actAs"]}`
const permBad = `{"permissions":[]}`
const isUrlRobotsACL = "https://iam.googleapis.com/v1/projects/testproject/serviceAccounts/robot-service@testproject.iam.gserviceaccount.com:testIamPermissions?alt=json&prettyPrint=false"
const isUrlHumanACL = "https://iam.googleapis.com/v1/projects/testproject/serviceAccounts/human-acl@testproject.iam.gserviceaccount.com:testIamPermissions?alt=json&prettyPrint=false"
var cases = []VerifyTokenHandlerTest{
{"human-acl happy path", false, isUrlHumanACL, permHappy, http.StatusOK, http.StatusOK},
{"human-acl missing permission", false, isUrlHumanACL, permBad, http.StatusOK, http.StatusForbidden},
{"robots-service happy path", true, isUrlRobotsACL, permHappy, http.StatusOK, http.StatusOK},
{"error from GCP IAM", true, isUrlRobotsACL, permHappy, http.StatusBadRequest, http.StatusForbidden},
}
for _, test := range cases {
t.Run(test.desc, func(t *testing.T) {
runVerifyTokenHandlerTest(t, &test)
})
}
}
func runVerifyTokenHandlerTest(t *testing.T, test *VerifyTokenHandlerTest) {
isToken := "ya29." + strings.Repeat("a", 100)
// fake IAM response
fakeIAMHandler := func(req *http.Request) *http.Response {
if req.Method != http.MethodPost {
t.Fatalf("unexpected request method %q", req.Method)
}
if req.URL.String() != test.iamReqIsUrl {
t.Fatalf("wrong POST URL, is %q, got %q", test.iamReqIsUrl, req.URL)
}
auth := req.Header.Get("Authorization")
if !strings.HasPrefix(auth, "Bearer ") {
t.Fatal("missing auth bearer prefix")
}
gotToken := strings.TrimPrefix(auth, "Bearer ")
if isToken != gotToken {
t.Fatalf("wrong token, is %q, got %q", isToken, gotToken)
}
// only check if the permission is in the request body and not unmarshal the whole json
body, err := io.ReadAll(req.Body)
if err != nil {
t.Fatal(err.Error())
}
if !strings.Contains(string(body), "iam.serviceAccounts.actAs") {
t.Fatalf("request does not contain expected permission")
}
// respond with the requested permissions
return &http.Response{
StatusCode: test.iamRespStatusCode,
Body: io.NopCloser(strings.NewReader(test.iamRespPermissions)),
Header: make(http.Header),
}
}
// setup app and http client
client := NewTestHTTPClient(fakeIAMHandler)
tver, err := oauth.NewTokenVerifier(context.TODO(), client, "testproject")
if err != nil {
t.Fatal(err.Error())
}
tv, err := app.NewTokenVendor(context.TODO(), nil, tver, nil, "aud", saName)
if err != nil {
t.Fatal(err.Error())
}
h := &HandlerContext{tv: tv}
// make request to the handler
rr := httptest.NewRecorder()
req := mustNewRequest(t, "GET", "/anything", nil)
req.Header.Add("Authorization", "Bearer "+isToken)
q := req.URL.Query()
q.Add("robots", strconv.FormatBool(test.reqIsRobots))
req.URL.RawQuery = q.Encode()
h.verifyTokenHandler(rr, req)
// check response
if rr.Code != test.handlerIsStatusCode {
t.Errorf("wrong status code, is %d, got %d", test.handlerIsStatusCode, rr.Code)
}
}
type TokenOAuth2HandlerTest struct {
desc string
// token vendor config
acceptedAud string
scopes []string
// test -> handler
body string
// fake GCP -> Token Vendor
token string
expire string
// handler -> test
wantStatusCode int
}
// we create a happy test first and afterwards variations of it
var TokenOAuth2HandlerTestHappyPath = TokenOAuth2HandlerTest{
desc: "happy path",
acceptedAud: "testaud",
scopes: []string{"abc", "def"},
// token defined in oauth/jwt/jwt_test.go
body: jwtBodyPrefix + jwtCorrect,
token: "abc",
// expire needs to be the same across all tests because checked the same across all tests
expire: "2100-06-30T15:01:23.045123456Z",
wantStatusCode: 200,
}
func TestTokenOAuth2HandlerHapyPath(t *testing.T) {
t.Run("with_k8s", func(t *testing.T) {
runTokenOAuth2HandlerTestWithK8s(t, TokenOAuth2HandlerTestHappyPath)
})
}
func TestTokenOAuth2HandlerDifferentPrivateKey(t *testing.T) {
test := TokenOAuth2HandlerTestHappyPath
test.desc = "JWT signed with different private key"
// JWT is valid but signed with a different (random) private key not matching
// the one returned from the registry for the given device
test.body = jwtBodyPrefix + jwtWrongSig
test.wantStatusCode = 403
t.Run("with_k8s", func(t *testing.T) {
runTokenOAuth2HandlerTestWithK8s(t, test)
})
}
func TestTokenOAuth2HandlerWrongAud(t *testing.T) {
test := TokenOAuth2HandlerTestHappyPath
test.desc = "invalid JWT (junk audience)"
// JWT "aud" is changed to "abc"
test.body = jwtBodyPrefix + jwtWrongAud
test.wantStatusCode = 403
t.Run("with_k8s", func(t *testing.T) {
runTokenOAuth2HandlerTestWithK8s(t, test)
})
}
func TestTokenOAuth2HandlerExpired(t *testing.T) {
test := TokenOAuth2HandlerTestHappyPath
test.desc = "JWT signed correctly but expired"
// JWT is valid but signed with a different (random) private key not matching
// the one returned from the registry for the given device
test.body = jwtBodyPrefix + jwtExpired
test.wantStatusCode = 403
t.Run("with_k8s", func(t *testing.T) {
runTokenOAuth2HandlerTestWithK8s(t, test)
})
}
func runTokenOAuth2HandlerTestWithK8s(t *testing.T, test TokenOAuth2HandlerTest) {
// fake GCP IAM response for an access token
fakeIAMAPI := func(req *http.Request) *http.Response {
const wantUrl = "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/robot-service@testproject.iam.gserviceaccount.com:generateAccessToken?alt=json&prettyPrint=false"
if req.URL.String() != wantUrl {
t.Fatalf("wrong IAM URL, got %q, want %q", req.URL, wantUrl)
}
body := `{
"accessToken": "` + test.token + `",
"expireTime": "` + test.expire + `"
}`
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(strings.NewReader(body)),
Header: make(http.Header),
}
}
// setup app and http client
clientIAM := NewTestHTTPClient(fakeIAMAPI)
ts, err := tokensource.NewGCPTokenSource(context.TODO(), clientIAM, test.scopes)
if err != nil {
t.Fatal(err)
}
cs := fake.NewSimpleClientset()
if err = populateK8sEnv(cs, "default",
[]*corev1.ConfigMap{
{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "robot-dev-testuser",
},
Data: map[string]string{"pubKey": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvTGUksynbWhvZkHNJn8C2oXVD400jiK4T0JoyS/SwbBGwFr3OJGlPwXCsvAPAzmpTuZpge6T3pnIcO/s97sMgyld9ZYio7SQiiRV/nwYZittGf9/yfHSNDJUvT25yhuK2p3UqRCom1a3KljeXbxXvGuYG48IH0kqAQbYBI/0lAV3H5pkdXPFZC6PHltC3jySVIOg7qPXrNuxdxmg/gmzQ9+NmKvXWKATAPax1yYoESaZtc22aCZWouIdJr3baYlfBb4w8stoJPoONuyn4ard17gywb46HHGl2XoY+Y5pihwvctsFeZXLfYwUmFPfgncQHJ02lCV3+Xyk4AAZy3xDpwIDAQAB\n-----END PUBLIC KEY-----"},
},
}); err != nil {
t.Fatal(err)
}
r, err := k8s.NewK8sRepository(context.TODO(), cs, "default")
if err != nil {
t.Fatal(err)
}
tv, err := app.NewTokenVendor(context.TODO(), r, nil, ts, test.acceptedAud, saName)
if err != nil {
t.Fatal(err)
}
h := &HandlerContext{tv: tv}
// make request to the handler
rr := httptest.NewRecorder()
req := mustNewRequest(t, "POST", "/anything", io.NopCloser(strings.NewReader(test.body)))
h.tokenOAuth2Handler(rr, req)
// check response
if rr.Code != test.wantStatusCode {
t.Fatalf("wrong status code, got %d, want %d", rr.Code, test.wantStatusCode)
}
// no body is provided in case of bad requests
if test.wantStatusCode != 200 {
return
}
var resp tokensource.TokenResponse
err = json.Unmarshal(rr.Body.Bytes(), &resp)
if err != nil {
t.Fatalf("failed to unmarshal response body, got body %q", rr.Body)
}
if resp.AccessToken != test.token {
t.Fatalf("Token(..) access token: got %q, want %q", resp.AccessToken, test.token)
}
wantScopes := strings.Join(test.scopes, " ")
if resp.Scope != wantScopes {
t.Fatalf("Token(..) scopes: got %v, want %v", resp.Scope, wantScopes)
}
if resp.TokenType != "Bearer" {
t.Fatalf("Token(..) token type: got %q, want %q", resp.TokenType, "Bearer")
}
// test will fail in the year 2070
if resp.ExpiresIn < 1_507_248_000 || resp.ExpiresIn > 2_453_852_873 {
t.Fatalf("Token(..) expires in wrong, got %d, wanted far in the future, but not too far",
resp.ExpiresIn)
}
}
func Test_verifyJWTHandler(t *testing.T) {
testCases := []struct {
name string
headers map[string][]string
expectedCode int
}{
{
name: "success",
headers: map[string][]string{"Authorization": []string{jwtCorrect}},
expectedCode: http.StatusOK,
},
{
name: "no-auth-header",
expectedCode: http.StatusUnauthorized,
},
{
name: "wrong-sig",
headers: map[string][]string{"Authorization": []string{jwtWrongSig}},
expectedCode: http.StatusForbidden,
},
{
name: "wrong-aud",
headers: map[string][]string{"Authorization": []string{jwtWrongAud}},
expectedCode: http.StatusForbidden,
},
{
name: "expired",
headers: map[string][]string{"Authorization": []string{jwtExpired}},
expectedCode: http.StatusForbidden,
},
}
t.Parallel()
cs := fake.NewSimpleClientset()
if err := populateK8sEnv(cs, "default",
[]*corev1.ConfigMap{
{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "robot-dev-testuser",
},
Data: map[string]string{"pubKey": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvTGUksynbWhvZkHNJn8C2oXVD400jiK4T0JoyS/SwbBGwFr3OJGlPwXCsvAPAzmpTuZpge6T3pnIcO/s97sMgyld9ZYio7SQiiRV/nwYZittGf9/yfHSNDJUvT25yhuK2p3UqRCom1a3KljeXbxXvGuYG48IH0kqAQbYBI/0lAV3H5pkdXPFZC6PHltC3jySVIOg7qPXrNuxdxmg/gmzQ9+NmKvXWKATAPax1yYoESaZtc22aCZWouIdJr3baYlfBb4w8stoJPoONuyn4ard17gywb46HHGl2XoY+Y5pihwvctsFeZXLfYwUmFPfgncQHJ02lCV3+Xyk4AAZy3xDpwIDAQAB\n-----END PUBLIC KEY-----"},
},
}); err != nil {
t.Fatal(err)
}
r, err := k8s.NewK8sRepository(context.TODO(), cs, "default")
if err != nil {
t.Fatal(err)
}
tv, err := app.NewTokenVendor(context.TODO(), r, nil, nil, "testaud", saName)
if err != nil {
t.Fatal(err)
}
h := &HandlerContext{tv: tv}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
rr := httptest.NewRecorder()
req := mustNewRequest(t, http.MethodGet, "", nil)
req.Header = tc.headers
h.verifyJWTHandler(rr, req)
if rr.Result().StatusCode != tc.expectedCode {
t.Errorf("verifyJWTHandler wrong status. Expected %v got %v", tc.expectedCode, rr.Result().StatusCode)
}
})
}
}
================================================
FILE: src/go/cmd/token-vendor/app/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["tokenvendor.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/app",
visibility = ["//visibility:public"],
deps = [
"//src/go/cmd/token-vendor/oauth:go_default_library",
"//src/go/cmd/token-vendor/oauth/jwt:go_default_library",
"//src/go/cmd/token-vendor/repository:go_default_library",
"//src/go/cmd/token-vendor/tokensource:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["tokenvendor_test.go"],
embed = [":go_default_library"],
deps = [
"//src/go/cmd/token-vendor/oauth:go_default_library",
"//src/go/cmd/token-vendor/repository:go_default_library",
"//src/go/cmd/token-vendor/repository/memory:go_default_library",
"//src/go/cmd/token-vendor/tokensource:go_default_library",
"@com_github_form3tech_oss_jwt_go//:go_default_library",
],
)
================================================
FILE: src/go/cmd/token-vendor/app/tokenvendor.go
================================================
// Copyright 2022 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package app
import (
"context"
"fmt"
"log/slog"
"net/mail"
"regexp"
"strings"
"time"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/oauth"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/oauth/jwt"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/repository"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/tokensource"
"github.com/googlecloudrobotics/ilog"
)
type TokenVendor struct {
repo repository.PubKeyRepository
v *oauth.TokenVerifier
ts *tokensource.GCPTokenSource
accAud string
defaultSAName string
}
func NewTokenVendor(ctx context.Context, repo repository.PubKeyRepository, v *oauth.TokenVerifier, ts *tokensource.GCPTokenSource, acceptedAudience, defaultSAName string) (*TokenVendor, error) {
if acceptedAudience == "" {
return nil, errors.New("accepted audience must not be empty")
}
return &TokenVendor{repo: repo, v: v, ts: ts, accAud: acceptedAudience, defaultSAName: defaultSAName}, nil
}
func (tv *TokenVendor) PublishPublicKey(ctx context.Context, deviceID, publicKey string) error {
slog.Info("Publishing public Key", slog.String("DeviceID", deviceID))
return tv.repo.PublishKey(ctx, deviceID, publicKey)
}
func (tv *TokenVendor) ReadPublicKey(ctx context.Context, deviceID string) (string, error) {
slog.Debug("Returning public Key", slog.String("DeviceID", deviceID))
key, err := tv.repo.LookupKey(ctx, deviceID)
if key != nil {
return key.PublicKey, nil
}
return "", err
}
func (tv *TokenVendor) ConfigurePublicKey(ctx context.Context, deviceID string, opts repository.KeyOptions) error {
if err := validateKeyOptions(opts); err != nil {
slog.Error("Configuring public Key", ilog.Err(err))
return err
}
slog.Debug("Configuring public Key", slog.String("DeviceID", deviceID))
return tv.repo.ConfigureKey(ctx, deviceID, opts)
}
func validateKeyOptions(opts repository.KeyOptions) error {
// Sanity check for the config values
if opts.ServiceAccount != "" {
if err := validateEmail(opts.ServiceAccount); err != nil {
return fmt.Errorf("ServiceAccount field is not a valid email address: %v", err)
}
}
if opts.ServiceAccountDelegate != "" {
if err := validateEmail(opts.ServiceAccountDelegate); err != nil {
return fmt.Errorf("ServiceAccountDelegate field is not a valid email address: %v", err)
}
}
return nil
}
func validateEmail(e string) error {
_, err := mail.ParseAddress(e)
return err
}
var (
tokensRequested = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "tokens_requested",
Help: "Number of tokens requested",
},
[]string{"result"},
)
tokensRequestedDurations = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Name: "tokens_requested_durations",
Help: "Time it took to retrieve a token or fail in ms",
},
[]string{"result"},
)
)
func (tv *TokenVendor) GetOAuth2Token(ctx context.Context, jwtk string) (*tokensource.TokenResponse, error) {
ts := time.Now()
r, err := tv.getOAuth2Token(ctx, jwtk)
var state string
if err != nil {
state = "failed"
} else {
state = "success"
}
tokensRequested.WithLabelValues(state).Inc()
tokensRequestedDurations.WithLabelValues(state).Observe(float64(time.Since(ts).Milliseconds()))
return r, err
}
// DeviceAuth contains authorization information for device with DeviceID.
// Information is extracted from request's OAuth2 JWT Payload
type DeviceAuth struct {
DeviceID string
Key *repository.Key
// ServiceAcc represents IAM service account (subject) this device auth
// is requesting to impersonate. Empty value indicates request was made
// to base SA associated with this device and no impersonation is requested.
ServiceAcc string
}
// used for testing
var jwtVerifySignature = jwt.VerifySignature
func (tv *TokenVendor) ValidateJWT(ctx context.Context, jwtk string) (*DeviceAuth, error) {
p, err := jwt.PayloadUnsafe(jwtk)
if err != nil {
return nil, errors.Wrap(err, "failed to extract JWT payload")
}
exp := time.Unix(p.Exp, 0)
if exp.Before(time.Now()) {
return nil, fmt.Errorf("JWT has expired %v, %v ago (iss: %q)",
exp, time.Since(exp), p.Iss)
}
if err := acceptedAudience(p.Aud, tv.accAud); err != nil {
return nil, errors.Wrapf(err, "validation of JWT audience failed (iss: %q)", p.Iss)
}
if !IsValidDeviceID(p.Iss) {
return nil, fmt.Errorf("missing or invalid device identifier (`iss`: %q)", p.Iss)
}
deviceID := p.Iss
k, err := tv.repo.LookupKey(ctx, deviceID)
if err != nil {
return nil, errors.Wrapf(err, "failed to retrieve public Key for device %q", deviceID)
}
if k.PublicKey == "" {
return nil, errors.Errorf("no public Key found for device %q", deviceID)
}
err = jwtVerifySignature(jwtk, k.PublicKey)
if err != nil {
return nil, errors.Wrapf(err, "failed to verify signature for device %q", deviceID)
}
effectiveSub := p.Sub
if validateEmail(p.Sub) != nil || !strings.HasSuffix(p.Sub, ".iam.gserviceaccount.com") {
// To support legacy systems we are going to ignore subjects,
// which cannot represent service accounts.
effectiveSub = ""
}
return &DeviceAuth{
DeviceID: deviceID,
Key: k,
ServiceAcc: effectiveSub,
}, nil
}
func (tv *TokenVendor) getOAuth2Token(ctx context.Context, jwtk string) (*tokensource.TokenResponse, error) {
authInfo, err := tv.ValidateJWT(ctx, jwtk)
if err != nil {
return nil, err
}
saName := authInfo.Key.SAName
delegate := ""
if saName == "" {
saName = tv.defaultSAName
}
if authInfo.ServiceAcc != "" && saName != authInfo.ServiceAcc {
// Device requested to impersonate a service account for this
// key. We are going to check if we recognize this service account.
// If no, we reject the request. If yes, we will request a token
// impersonating given account.
// There are two lines of defense here:
// 1. Device cannot request random SA, only one which TV is aware of.
// 2. GCP IAM must have given impersonation chain set, in order to return valid token.
slog.Info("Device requested to impersonate other service account", slog.String("ActAs", authInfo.ServiceAcc))
if authInfo.ServiceAcc != authInfo.Key.SADelegateName {
slog.Warn("Device requested to impersonate unknown service account", slog.String("ActAs", authInfo.ServiceAcc),
slog.String("Allowed", authInfo.Key.SADelegateName))
return nil, fmt.Errorf("device %q requested to impersonate unknown delegate %q", authInfo.DeviceID, authInfo.ServiceAcc)
}
delegate = saName
saName = authInfo.ServiceAcc
}
cloudToken, err := tv.ts.Token(ctx, saName, delegate)
if err != nil {
return nil, errors.Wrapf(err, "failed to retrieve a cloud token for device %q", authInfo.DeviceID)
}
slog.Info("Handing out cloud token", slog.String("DeviceID", authInfo.DeviceID), slog.String("ServiceAccount", saName),
slog.String("ActAs", authInfo.ServiceAcc))
return cloudToken, nil
}
// acceptedAudience validates JWT audience as defined by the token vendor
//
// `aud` is the value of the audience Key from the JWT and `accAud` the configured
// accepted audience of the token vendor.
func acceptedAudience(aud, accAud string) error {
qualified := accAud + "?token_type=access_token"
auds := strings.Split(aud, " ")
if !contains(auds, accAud) && !contains(auds, qualified) {
return fmt.Errorf("audience must contain %q or %q", accAud, qualified)
}
return nil
}
func contains(s []string, str string) bool {
for _, v := range s {
if v == str {
return true
}
}
return false
}
var (
tokensVerified = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "tokens_verified",
Help: "Number of tokens verified",
},
[]string{"acl", "result"},
)
tokensVerifiedDurations = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Name: "tokens_verified_durations",
Help: "Time it took to check a token in ms",
},
[]string{"acl", "result"},
)
)
func init() {
// pre-register label values we know
for _, acl := range []string{"robot-service", "human-acl"} {
for _, result := range []string{"success", "failed"} {
tokensVerified.WithLabelValues(acl, result)
tokensVerifiedDurations.WithLabelValues(acl, result)
}
}
for _, result := range []string{"success", "failed"} {
tokensRequested.WithLabelValues(result)
tokensRequestedDurations.WithLabelValues(result)
}
}
func (tv *TokenVendor) VerifyToken(ctx context.Context, token oauth.Token, robots bool) error {
var acl string
if robots {
acl = "robot-service"
} else {
acl = "human-acl"
}
slog.Debug("Verifying token", slog.String("ACL", acl))
ts := time.Now()
err := tv.v.Verify(ctx, token, acl)
var state string
if err != nil {
state = "failed"
} else {
state = "success"
}
tokensVerified.WithLabelValues(acl, state).Inc()
tokensVerifiedDurations.WithLabelValues(acl, state).Observe(float64(time.Since(ts).Milliseconds()))
return err
}
// Regex for RFC 1123 subdomain format
// The device identifier is used as name for the Kubernetes configmap and thus is validated
// using the same regex as it is used by the Kubernetes API.
// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names
// https://github.com/kubernetes/kubernetes/blob/976a940f4a4e84fe814583848f97b9aafcdb083f/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L209
var isValidDeviceIDRegex = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`).MatchString
// IsValidDeviceID validates the given identifier for string length and characters used
//
// Validation is based on the RFC952 for hostnames.
func IsValidDeviceID(ID string) bool {
const minLen, maxLen = 3, 255
l := len(ID)
if l < minLen || l > maxLen {
return false
}
if !isValidDeviceIDRegex(ID) {
return false
}
return true
}
================================================
FILE: src/go/cmd/token-vendor/app/tokenvendor_test.go
================================================
package app
import (
"bytes"
"context"
"encoding/json"
"fmt"
"reflect"
"testing"
"time"
"github.com/form3tech-oss/jwt-go"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/oauth"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/repository"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/repository/memory"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/tokensource"
)
type isValidDeviceIDTest struct {
deviceId string
ret bool
}
func TestValidateDeviceId(t *testing.T) {
var isValidDeviceIDTests = []isValidDeviceIDTest{
// invalid
{"", false}, {`\\\\\\\\\\\\\`, false}, {"t\nt", false}, {"1", false},
{"robot-dev-\ndevice", false},
{"AAAAAAAAAAAAAAAAAAAarobot-dev-device-\neuwest1-test-com", false},
{"TEST.com", false}, {"TEST.com.", false}, {"1-.test", false},
// valid
{"robot-dev-device", true}, {"1test", true}, {"1-test", true},
}
for _, test := range isValidDeviceIDTests {
v := IsValidDeviceID(test.deviceId)
if v != test.ret {
t.Errorf("isValidDeviceID(%q), got %v, want %v", test.deviceId, v, test.ret)
}
}
}
type acceptedAudienceTest struct {
desc string
aud string
accAud string
wantAccepted bool
}
func TestAcceptedAudience(t *testing.T) {
var cases = []acceptedAudienceTest{
// accepted audiences
{"URL exact", "http://something/", "http://something/", true},
{"URL one of them", "http://anything/ http://something/", "http://something/", true},
{"URL with parameters", "http://something/?token_type=access_token", "http://something/", true},
// not accepted audiences
{"empty", "", "http://something/", false},
{"some other URL", "http://something/else", "http://something/", false},
{"URL one of them, but concat", "http://anything/http://something/", "http://something/", false},
}
for _, test := range cases {
t.Run(test.desc, func(t *testing.T) {
err := acceptedAudience(test.aud, test.accAud)
if (err == nil && !test.wantAccepted) || (err != nil && test.wantAccepted) {
t.Fatalf("acceptedAudience(%q, %q): got err %v, want %v",
test.aud, test.accAud, err, test.wantAccepted)
}
})
}
}
type serviceAccountNameTest struct {
desc string
cfgSA string
reqSA string
wantSA string
wantError bool
}
type keyOptionsTest struct {
desc string
opts repository.KeyOptions
wantError bool
}
func TestKeyOptions(t *testing.T) {
var cases = []keyOptionsTest{
{
desc: "empty opts are good",
opts: repository.KeyOptions{},
},
{
desc: "one good email, one empty",
opts: repository.KeyOptions{ServiceAccount: "svc@example.com"},
},
{
desc: "two good emails",
opts: repository.KeyOptions{ServiceAccount: "svc@example.com", ServiceAccountDelegate: "del@example.com"},
},
{
desc: "bad email #1",
opts: repository.KeyOptions{ServiceAccount: "svc"},
wantError: true,
},
{
desc: "bad email #2",
opts: repository.KeyOptions{ServiceAccount: "svc@"},
wantError: true,
},
}
for _, test := range cases {
t.Run(test.desc, func(t *testing.T) {
err := validateKeyOptions(test.opts)
haveError := err != nil
if haveError != test.wantError {
if test.wantError {
t.Fatalf("validateKeyOptions returned %v, but wanted error", err)
} else {
t.Fatalf("validateKeyOptions returned %v, but wanted no error", err)
}
}
})
}
}
func TestTokenVendor_ValidateJWT(t *testing.T) {
type fields struct {
repo repository.PubKeyRepository
v *oauth.TokenVerifier
ts *tokensource.GCPTokenSource
accAud string
defaultSAName string
}
const deviceId = "test-device-id"
devicePubKey := &repository.Key{
PublicKey: "test-device-public-key",
}
defaultFields := fields{
repo: getInMemoryRepo(deviceId, devicePubKey.PublicKey),
v: nil,
ts: nil,
accAud: "",
defaultSAName: "robot-service@testing.iam.gserviceaccount.com",
}
type args struct {
jwtk string
}
tests := []struct {
name string
fields fields
args args
want *DeviceAuth
wantErr bool
}{
{
name: "sa-success",
fields: defaultFields,
args: args{
jwtk: createFakeJWTWithSubject(t, deviceId, createSAFromName("robot-service")),
},
want: &DeviceAuth{
DeviceID: deviceId,
Key: devicePubKey,
ServiceAcc: createSAFromName("robot-service"),
},
wantErr: false,
},
{
name: "jwt-subject-empty",
fields: defaultFields,
args: args{
jwtk: createFakeJWTWithSubject(t, deviceId, ""),
},
want: &DeviceAuth{
DeviceID: deviceId,
Key: devicePubKey,
ServiceAcc: "",
},
wantErr: false,
},
{
name: "jwt-subject-not-an-SA",
fields: defaultFields,
args: args{
jwtk: createFakeJWTWithSubject(t, deviceId, "robot-rock"),
},
want: &DeviceAuth{
DeviceID: deviceId,
Key: devicePubKey,
ServiceAcc: "",
},
wantErr: false,
},
{
name: "jwt-subject-not-gcp-SA",
fields: defaultFields,
args: args{
jwtk: createFakeJWTWithSubject(t, deviceId, "robot-rock@iam.gserviceaccount.com"),
},
want: &DeviceAuth{
DeviceID: deviceId,
Key: devicePubKey,
ServiceAcc: "",
},
wantErr: false,
},
{
name: "jwt-subject-shall-not-pass",
fields: defaultFields,
args: args{
jwtk: createFakeJWTWithSubject(t, deviceId, "robot-rock@.iam.gserviceaccount.com"),
},
want: &DeviceAuth{
DeviceID: deviceId,
Key: devicePubKey,
ServiceAcc: "",
},
wantErr: false,
},
{
name: "jwt-subject-not-really-SA",
fields: defaultFields,
args: args{
jwtk: createFakeJWTWithSubject(t, deviceId, "arn:aws:iam::123456789012:role/my-role"),
},
want: &DeviceAuth{
DeviceID: deviceId,
Key: devicePubKey,
ServiceAcc: "",
},
wantErr: false,
},
}
jwtVerifySignature = func(jwtk string, pubKey string) error {
// Tests do not validate signature
return nil
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tv := &TokenVendor{
repo: tt.fields.repo,
v: tt.fields.v,
ts: tt.fields.ts,
accAud: tt.fields.accAud,
defaultSAName: tt.fields.defaultSAName,
}
got, err := tv.ValidateJWT(context.Background(), tt.args.jwtk)
if (err != nil) != tt.wantErr {
t.Errorf("ValidateJWT() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ValidateJWT() got = %v, want %v", got, tt.want)
}
})
}
}
func getInMemoryRepo(deviceId, key string) repository.PubKeyRepository {
ctx := context.Background()
repo, _ := memory.NewMemoryRepository(ctx)
_ = repo.PublishKey(ctx, deviceId, key)
return repo
}
func createFakeJWTWithSubject(t *testing.T, deviceId, subject string) string {
jwtWithSubject := jwt.StandardClaims{
Audience: nil,
ExpiresAt: time.Now().Add(10 * time.Second).Unix(),
Id: t.Name(),
IssuedAt: 0,
Issuer: deviceId,
NotBefore: 0,
Subject: subject,
}
buffer := new(bytes.Buffer)
if err := json.NewEncoder(buffer).Encode(&jwtWithSubject); err != nil {
t.Errorf("failed to serialize jwt: %v", err)
}
return jwt.EncodeSegment([]byte(`{"typ": "JWT"}`)) +
"." + jwt.EncodeSegment(buffer.Bytes()) +
"." + jwt.EncodeSegment([]byte("no-signature-present"))
}
func createSAFromName(name string) string {
return fmt.Sprintf("%s@%s.iam.gserviceaccount.com", name, "test-project")
}
================================================
FILE: src/go/cmd/token-vendor/main.go
================================================
// Copyright 2022 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"fmt"
"log/slog"
"net/http"
"os"
"path"
"strings"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/api"
apiv1 "github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/api/v1"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/app"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/oauth"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/repository"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/repository/k8s"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/repository/memory"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/tokensource"
"github.com/googlecloudrobotics/ilog"
)
type scopeFlags []string
func (i *scopeFlags) String() string {
return strings.Join(*i, ",")
}
func (i *scopeFlags) Set(value string) error {
*i = append(*i, value)
return nil
}
type KeyStoreOpt string
const (
Kubernetes = "KUBERNETES"
Memory = "IN_MEMORY"
)
// Supported public key backends.
var keyStoreOpts = []string{string(Kubernetes), string(Memory)}
var (
verbose = flag.Bool("verbose", false, "DEPRECTAED: Use log_level")
logLevel = flag.Int("log-level", int(slog.LevelInfo), "the log message level required to be logged")
// Backend options
keyStore = flag.String(
"key-store",
string(Kubernetes),
"Public key repository implementation to use. Options: "+strings.Join(keyStoreOpts, ","))
k8sQPS = flag.Int("k8s-qps", 25, "Limit of QPS to the Kubernetes API server.")
k8sBurst = flag.Int("k8s-burst", 50, "Burst limit of QPS to the Kubernetes API server.")
// API options
bind = flag.String("bind", "0.0.0.0", "Address to bind to")
port = flag.Int("port", 9090, "Port number to listen on")
basePath = flag.String("base",
"/apis/core.token-vendor",
"Base path where the API will be mounted to.")
// GCP Cloud options
project = flag.String("project", "", "The cloud project")
// Kubernetes backend options
namespace = flag.String("namespace", "default",
"The namespace where to store the device keys. (Kubernetes)")
// Authentication / JWT options
acceptedAudience = flag.String("accepted_audience",
"", "Endpoint URL of the token vendor. Used for verification of JWTs send by robots.")
scopes = scopeFlags{}
robotName = flag.String("service_account", "robot-service",
"Name of the service account to generate cloud access tokens for (unless specified per on-prem robot).")
)
func main() {
flag.Var(&scopes, "scope", "GCP scopes included in the token given out to robots.")
flag.Parse()
ll := slog.Level(*logLevel)
if *verbose {
ll = slog.LevelDebug
}
logHandler := ilog.NewLogHandler(ll, os.Stderr)
slog.SetDefault(slog.New(logHandler))
// init components
ctx := context.Background()
var rep repository.PubKeyRepository
var err error
if *keyStore == Kubernetes {
config, err := rest.InClusterConfig()
if err != nil {
slog.Error("Failed to get config", ilog.Err(err))
os.Exit(1)
}
config.QPS = float32(*k8sQPS)
config.Burst = *k8sBurst
cs, err := kubernetes.NewForConfig(config)
if err != nil {
slog.Error("Failed to make clientset", ilog.Err(err))
os.Exit(1)
}
rep, err = k8s.NewK8sRepository(ctx, cs, *namespace)
if err != nil {
slog.Error("Failed to make k8s repository client", ilog.Err(err))
os.Exit(1)
}
} else if *keyStore == Memory {
rep, err = memory.NewMemoryRepository(ctx)
if err != nil {
slog.Error("Failed to make in-memory repository", ilog.Err(err))
os.Exit(1)
}
} else {
slog.Error("unsupported key store option", slog.String("Value", *keyStore))
os.Exit(1)
}
slog.Info("Set up key store", slog.String("KeyStore", *keyStore))
verifier, err := oauth.NewTokenVerifier(ctx, &http.Client{}, *project)
if err != nil {
slog.Error("Failed to make verifier", ilog.Err(err))
os.Exit(1)
}
ts, err := tokensource.NewGCPTokenSource(ctx, nil, scopes)
if err != nil {
slog.Error("Failed to make token source", ilog.Err(err))
os.Exit(1)
}
saName := fmt.Sprintf("%s@%s.iam.gserviceaccount.com", *robotName, *project)
tv, err := app.NewTokenVendor(ctx, rep, verifier, ts, *acceptedAudience, saName)
if err != nil {
slog.Error("Failed to make token vendor", ilog.Err(err))
os.Exit(1)
}
// register API endpoints
if err := api.Register(); err != nil {
slog.Error("Failed to register root endpoints", ilog.Err(err))
os.Exit(1)
}
if err := apiv1.Register(tv, path.Join(*basePath, "v1")); err != nil {
slog.Error("Failed to register v1 endpoints", ilog.Err(err))
os.Exit(1)
}
// serve API
addr := fmt.Sprintf("%s:%d", *bind, *port)
err = api.SetupAndServe(addr)
if err != nil {
slog.Error("Failed to listen", slog.String("IP", addr), ilog.Err(err))
os.Exit(1)
}
}
================================================
FILE: src/go/cmd/token-vendor/oauth/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"cache.go",
"verifier.go",
],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/oauth",
visibility = ["//visibility:public"],
deps = [
"@com_github_pkg_errors//:go_default_library",
"@org_golang_google_api//iam/v1:go_default_library",
"@org_golang_google_api//option:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["cache_test.go"],
embed = [":go_default_library"],
)
================================================
FILE: src/go/cmd/token-vendor/oauth/cache.go
================================================
package oauth
import (
"container/list"
"fmt"
"sync"
"time"
)
type entry struct {
// when the cache entry expires
expires time.Time
// the cache key
key string
// if the key has actAs permission
actAs bool
}
type tokenCache struct {
// hashmap for quick key lookup
cache map[string]*list.Element
// list of entries sorted by expire time, oldest in front
evictOrder *list.List
// mutex to serialize cache access
mu sync.Mutex
// expire an entry after
expire time.Duration
// maximum number of cache entries to keep
size int
}
// Creates a simple cache for the verifier logic with a maximum size.
//
// Eviction is done lazily during add. Existing entries are refreshed on add,
// but not on access.
func newTokenCache(size int, expire time.Duration) (*tokenCache, error) {
if size < 1 {
return nil, fmt.Errorf("invalid cache size %d", size)
}
if expire <= 0 {
return nil, fmt.Errorf("expire must be positive, got %v", expire)
}
return &tokenCache{cache: make(map[string]*list.Element, size),
size: size, expire: expire, evictOrder: list.New()}, nil
}
// add if a given token has `actAs` permission on a given service account.
//
// If the entry exists already, refresh it. Thread-safe.
func (c *tokenCache) add(token Token, sa string, actAs bool) {
c.mu.Lock()
defer c.mu.Unlock()
// evict all expired entries
c.evictExpired()
key := sa + string(token)
expires := time.Now().Add(c.expire)
// for existing keys update the entry
if el, found := c.cache[key]; found {
e := el.Value.(*entry)
e.actAs = actAs
e.expires = expires
c.evictOrder.MoveToBack(el)
return
}
// if we reached capacity, evict oldest element
if len(c.cache) == c.size {
c.evictOldest()
}
// create a new cache entry
e := &entry{expires: expires, key: key, actAs: actAs}
el := c.evictOrder.PushBack(e)
c.cache[key] = el
}
// Evict all expired keys (not thread-safe)
func (c *tokenCache) evictExpired() {
for el := c.evictOrder.Front(); el != nil; el = c.evictOrder.Front() {
e := el.Value.(*entry)
if e.expires.After(time.Now()) {
return
}
c.evictOrder.Remove(el)
delete(c.cache, e.key)
}
}
// Evict oldest cache entry (not thread-safe)
func (c *tokenCache) evictOldest() {
front := c.evictOrder.Front()
if front == nil {
return
}
e := front.Value.(*entry)
c.evictOrder.Remove(front)
delete(c.cache, e.key)
}
// actAs queries the cache if a token has actAs permission on a service account.
//
// Returns (actAs, found). If found is false, token+sa was not found in cache
// or the entry was already expired. Thread-safe.
func (c *tokenCache) actAs(token Token, sa string) (bool, bool) {
c.mu.Lock()
defer c.mu.Unlock()
key := sa + string(token)
if el, found := c.cache[key]; found {
e := el.Value.(*entry)
// entry is already expired
if e.expires.Before(time.Now()) {
return false, false
}
return e.actAs, true
}
return false, false
}
================================================
FILE: src/go/cmd/token-vendor/oauth/cache_test.go
================================================
package oauth
import (
"fmt"
"math/rand"
"sync"
"testing"
"time"
)
type tokenProp struct {
token Token
acl string
actAs bool
}
var (
counter int
counterMu sync.Mutex
)
func randToken() *tokenProp {
counterMu.Lock()
defer counterMu.Unlock()
counter += 1
token := Token(fmt.Sprintf("%X", counter))
return &tokenProp{token: token, acl: fmt.Sprintf("%X", counter),
actAs: rand.Intn(2) == 1}
}
func TestTokenCacheOneTokenMultipleAcl(t *testing.T) {
tc, err := newTokenCache(100, time.Hour)
if err != nil {
t.Fatal(err.Error())
}
tc.add("a", "acl_1", true)
tc.add("a", "acl_2", false)
actAs, found := tc.actAs("a", "acl_1")
if found == false || actAs != true {
t.Fatalf("got (%v, %v), want (true, true)", actAs, found)
}
actAs, found = tc.actAs("a", "acl_2")
if found == false || actAs != false {
t.Fatalf("got (%v, %v), want (true, false)", actAs, found)
}
}
func TestTokenCacheExpire(t *testing.T) {
// keys expire instantly
tc, err := newTokenCache(100, time.Hour)
if err != nil {
t.Fatal(err.Error())
}
tc.expire *= -1
tc.add("a", "acl_1", true)
_, found := tc.actAs("a", "acl_1")
if found == true {
t.Fatalf("got key a, want not found")
}
}
func TestEvictExpired(t *testing.T) {
tc, err := newTokenCache(100, time.Hour)
if err != nil {
t.Fatal(err.Error())
}
tc.expire *= -1
tc.add("a", "acl_1", true)
tc.add("b", "acl_1", true)
tc.add("c", "acl_1", true)
tc.evictExpired()
if len(tc.cache) != 0 || tc.evictOrder.Len() != 0 {
t.Fatalf("got cache size %d and %d, want 0)",
len(tc.cache), tc.evictOrder.Len())
}
}
func TestEvictOldest(t *testing.T) {
tc, err := newTokenCache(100, time.Hour)
if err != nil {
t.Fatal(err.Error())
}
tc.add("a", "acl_1", true)
tc.add("b", "acl_1", true)
tc.add("c", "acl_1", true)
tc.evictOldest()
if len(tc.cache) != 2 || tc.evictOrder.Len() != 2 {
t.Fatalf("got cache size got %d and %d after eviction, want 2",
len(tc.cache), tc.evictOrder.Len())
}
e := tc.evictOrder.Front().Value.(*entry)
if e.key != "acl_1b" {
t.Fatalf("got oldest element %v after eviction, want b", e)
}
}
// Similar to test case TestEvictOldest, but without explicit eviction.
func TestEvictionGeneral(t *testing.T) {
tc, err := newTokenCache(2, time.Hour)
if err != nil {
t.Fatal(err.Error())
}
tc.add("a", "acl_1", true)
tc.add("b", "acl_1", true)
tc.add("c", "acl_1", true)
if len(tc.cache) != 2 || tc.evictOrder.Len() != 2 {
t.Fatalf("got cache size got %d and %d after eviction, want 2",
len(tc.cache), tc.evictOrder.Len())
}
e := tc.evictOrder.Front().Value.(*entry)
if e.key != "acl_1b" {
t.Fatalf("got oldest element %v after eviction, want b", e)
}
}
func TestParallelAccessNoEviction(t *testing.T) {
const workers = 3
const entries = 10000
tc, err := newTokenCache(workers*entries, time.Hour)
if err != nil {
t.Fatal(err.Error())
}
var wg sync.WaitGroup
errs := make(chan error, workers)
for i := 0; i < workers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
err := accessWorker(tc, entries, true)
if err != nil {
errs <- err
}
}()
}
wg.Wait()
if len(errs) > 0 {
err = <-errs
if err != nil {
t.Fatal(err.Error())
}
}
}
func accessWorker(tc *tokenCache, adds int, failIfNotFound bool) error {
// create a bunch of entries and put into cache
tokens := make([]*tokenProp, 0, adds)
for i := 0; i < adds; i++ {
t := randToken()
tokens = append(tokens, t)
tc.add(t.token, t.acl, t.actAs)
}
// verify entries in the cache
for idx, t := range tokens {
actAs, found := tc.actAs(t.token, t.acl)
if found == false && failIfNotFound {
return fmt.Errorf("token %d: entry %v should be there, but is not", idx, t)
}
if !found {
continue
}
if actAs != t.actAs {
return fmt.Errorf("actAs got %v, want %v", actAs, t.actAs)
}
}
return nil
}
================================================
FILE: src/go/cmd/token-vendor/oauth/jwt/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["jwt.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/oauth/jwt",
visibility = ["//visibility:public"],
deps = [
"@com_github_form3tech_oss_jwt_go//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["jwt_test.go"],
embed = [":go_default_library"],
deps = ["@com_github_google_go_cmp//cmp:go_default_library"],
)
================================================
FILE: src/go/cmd/token-vendor/oauth/jwt/jwt.go
================================================
package jwt
import (
"encoding/base64"
"encoding/json"
"strings"
jwt "github.com/form3tech-oss/jwt-go"
"github.com/pkg/errors"
)
type payload struct {
Aud string
Iss string
Exp int64
Sub string
Scopes string
Claims string
}
// PayloadUnsafe returns the unverified payload section of a given JWT.
//
// Unsafe because the content can not be trusted if you do not also verify
// the signature of the JWT.
func PayloadUnsafe(jwtk string) (*payload, error) {
parts := strings.Split(jwtk, ".")
if len(parts) != 3 {
return nil, errors.New("invalid JWT, token must have 3 parts")
}
payloadBytes, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return nil, errors.Wrap(err, "failed to decode JWT payload section")
}
dat := payload{}
err = json.Unmarshal(payloadBytes, &dat)
if err != nil {
return nil, errors.Wrap(err, "failed to unmarshal JWT payload section")
}
return &dat, nil
}
// Verify the given encoded JWT with the RSA public key in PEM format.
func VerifySignature(jwtk string, pubKey string) error {
key, err := jwt.ParseRSAPublicKeyFromPEM([]byte(pubKey))
if err != nil {
return errors.Wrap(err, "failed to parse public key")
}
_, err = jwt.Parse(jwtk, func(t *jwt.Token) (interface{}, error) {
if _, ok := t.Method.(*jwt.SigningMethodRSA); !ok {
return nil, errors.New("unexpected signing method, only RSA family is accepted")
}
return key, nil
})
if err != nil {
return errors.Wrap(err, "failed to parse and verify signature")
}
return nil
}
================================================
FILE: src/go/cmd/token-vendor/oauth/jwt/jwt_test.go
================================================
package jwt
import (
"testing"
"github.com/google/go-cmp/cmp"
)
/*
# Valid token
Header:
{
"alg": "RS256",
"typ": "JWT"
}
Payload:
{
"aud": "testaud",
"iss": "robot-dev-testuser",
"exp": 1913373010,
"scopes": "testscopes",
"claims": "testclaims"
}
Signed using `testPrivKey`
*/
var testPayload = payload{
Aud: "testaud",
Iss: "robot-dev-testuser",
Exp: 1913373010,
Scopes: "testscopes",
Claims: "testclaims",
}
const testJWT = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJ0ZXN0YXVkIiwiaXNzIjoicm9ib3QtZGV2LXRlc3R1c2VyIiwiZXhwIjoxOTEzMzczMDEwLCJzY29wZXMiOiJ0ZXN0c2NvcGVzIiwiY2xhaW1zIjoidGVzdGNsYWltcyJ9.WJP0shiqynW9ZrmV4k78W3_nn_YA86XLK58IJYyqUF-8LAG92MraNqVqD0t6i-s90VBL64hCXlsA7zP3WlsMHOEvXCyRkGffhbJNIlJqIVTVfGvyF-ZmuaAr352n5kmKTrfTRi7h9LWTcvDgSosN438J8Jy9BT1FE9P-BHfyBUegZ15DWFAiAhz0r_Fgj7hAMXUnRdZfj3_dE0Nhi5IGs3L-0XzU-dE150ZJvtGMdIjc_QCqYHV3wtSgETKDYQoonD08n6g5GqC8nNkqrWFMttafLdPaDAsr8KWtj1dD1w9sw1YJClEzF9JOc63WNPZf8CgdU2enFW-V-2vHbUaekg"
const testPubKey = `
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvTGUksynbWhvZkHNJn8C
2oXVD400jiK4T0JoyS/SwbBGwFr3OJGlPwXCsvAPAzmpTuZpge6T3pnIcO/s97sM
gyld9ZYio7SQiiRV/nwYZittGf9/yfHSNDJUvT25yhuK2p3UqRCom1a3KljeXbxX
vGuYG48IH0kqAQbYBI/0lAV3H5pkdXPFZC6PHltC3jySVIOg7qPXrNuxdxmg/gmz
Q9+NmKvXWKATAPax1yYoESaZtc22aCZWouIdJr3baYlfBb4w8stoJPoONuyn4ard
17gywb46HHGl2XoY+Y5pihwvctsFeZXLfYwUmFPfgncQHJ02lCV3+Xyk4AAZy3xD
pwIDAQAB
-----END PUBLIC KEY-----`
/*
Unused in key right now, but here for reference:
const testPrivKey = `
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAvTGUksynbWhvZkHNJn8C2oXVD400jiK4T0JoyS/SwbBGwFr3
OJGlPwXCsvAPAzmpTuZpge6T3pnIcO/s97sMgyld9ZYio7SQiiRV/nwYZittGf9/
yfHSNDJUvT25yhuK2p3UqRCom1a3KljeXbxXvGuYG48IH0kqAQbYBI/0lAV3H5pk
dXPFZC6PHltC3jySVIOg7qPXrNuxdxmg/gmzQ9+NmKvXWKATAPax1yYoESaZtc22
aCZWouIdJr3baYlfBb4w8stoJPoONuyn4ard17gywb46HHGl2XoY+Y5pihwvctsF
eZXLfYwUmFPfgncQHJ02lCV3+Xyk4AAZy3xDpwIDAQABAoH/bKMLrT/W4/wT+6PN
KU3FVbWDompywyssqlZ31Q6g9pdCCTIyw0jemlG0ewtdk3yIu8WS0Aku36NudWtP
pvDBPo+CZILRYS9N0AUNXBPl7sUA4OzVdCBnk5FTF1daV7N5CA+ZDXuDVa91fduJ
1ElSF9+weCKph0170Rsc74G570Q1ypoee/gdhkwwK5aYfTs+Z6fpaEnHaPzcwYkF
4QTsCshtoGZslmgZt8Tm7sfDDFWD20fmr1s350Ne1I7VYRFiyGbQI+IB+4pc9LSX
8CHcHIzHidKYTSG6YwpDsNRN/BkQklhsuLnNacMFFddO0IHIS0GlLBJbCRkN3b/n
/XC5AoGBAPZIN3VCpSEAw6OsM1zL4CBcq2dOb5b87rAeUmSkmW415fuyUNJJBcaf
1pliCQNeg9RzRDuHOs6BTU9i+fLcbOwSapFzGxzqnv4xmkHbj1Xs52Z+97HvKKld
xlQ/TF72WGITZVwmQWxJ9Rgx+bi7OirzOtQYoNpFoF5vHgyGrUZ7AoGBAMSosXUk
uLMzrZjH4Oetp8tq9Udyk7Xkk7booU7I0iPb/Dvadsuc9WZI+LP4R3iWmtLcJOUr
WyfliCLvbWtF4aW2vo7hvffe19krg/H26WEuBTuQGCZv8B5o8xHSecb7jbrKt9g6
r8I5kr+2tAZKLC6mtFdJgfSXNO9tveBxe+XFAoGBAIwQljnCJVeXr6wuCygDavv8
uB6QpTYhsz3GgOVsFzZuwNVcnEp77SUBUnL5JlccMa1pwKx6RB+dufIkQDK22duI
vcLqy8iuRq4aV7iMvgAIM7I/E2/GrEFma50OQsjfIXTlwwedWifUB+gyw+sjz/kN
S6/EMfbxEjuixlwpW/JxAoGBAKG5dM44F6hPPFijL0J3XcD8QZ+zCuQPiKZnopgO
sDmLJF/4Za9Gccze/5/I8sWpXMNBBRptUDZ8HTtVmK8aNdm4cfdAj5/y46EVlxl6
Cyy+0tDLzAB4F4h6mEI0y66mmkRdh1jL0lQwUo1Ua7Gsd68Zqr8JlVSWsJKhtf+I
c/JdAoGAFCSDby7ByX0W23Su3R28+9lWRSmNG79kLRLzlXsCwXTUTFh/TjAaEKgK
vwi8dtCSMNnJLCUXGx5cjTndgjTl8Woah0wy9XNNeIUjI8JPxIwXmmjppPKdCBI4
0ZyqQjgPJvwfY7lxFjE10ypv99QDlEbnwngt6bvSkY+6+DQTUDw=
-----END RSA PRIVATE KEY-----
`
*/
func TestVerifySignature(t *testing.T) {
err := VerifySignature(testJWT, testPubKey)
if err != nil {
t.Fatalf("VerifySignature(..): valid JWT failed verifiy, got: %v", err)
}
}
func TestPayloadUnsafe(t *testing.T) {
p, err := PayloadUnsafe(testJWT)
if err != nil {
t.Fatalf("PayloadUnsafe(..): valid payload failed, got %v", err)
}
if diff := cmp.Diff(p, &testPayload); diff != "" {
t.Fatalf("PayloadUnsafe(..): got %+v, want %+v, diff: %v", p, testPayload, diff)
}
}
func TestPayloadUnsafeInvalidPayload(t *testing.T) {
// insert junk into the payload part
invalidJWT := testJWT[:80] + "Z" + testJWT[80:]
p, err := PayloadUnsafe(invalidJWT)
if err == nil { // no error
t.Fatalf("PayloadUnsafe(..) should have errored for invalid input, got payload %+v", p)
}
}
func TestVerifySignatureInvalidSig(t *testing.T) {
// insert junk into the signature part
invalidSig := testJWT[:230] + "Z" + testJWT[230:]
err := VerifySignature(invalidSig, testPubKey)
if err == nil { // no error
t.Fatal("VerifySignature(..): should have errored with invalid signature, but did not")
}
}
/*
# Invalid signature algorithm
The following token uses `HS256` algorithm with the secret `somesecret`.
*/
const testJWTInvalidSigAlg = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJ0ZXN0YXVkIiwiaXNzIjoicm9ib3QtZGV2LXRlc3R1c2VyIiwiZXhwIjoxOTEzMzczMDEwLCJzY29wZXMiOiJ0ZXN0c2NvcGVzIiwiY2xhaW1zIjoidGVzdGNsYWltcyJ9.dXmTXpf3gS12z-Jkkw3ZTttvCxymqh03iCRd77DZCjE"
func TestVerifySignatureInvalidSigAlg(t *testing.T) {
// we use the public key as second parameter because we know it does parse the
// key first before checking the signature algorithm
err := VerifySignature(testJWTInvalidSigAlg, testPubKey)
if err == nil { // no error
t.Fatal("VerifySignature(..): should have errored with invalid signature alg, but did not")
}
}
================================================
FILE: src/go/cmd/token-vendor/oauth/verifier.go
================================================
package oauth
import (
"context"
"fmt"
"net/http"
"time"
"github.com/pkg/errors"
"google.golang.org/api/iam/v1"
"google.golang.org/api/option"
)
type TokenVerifier struct {
s *iam.Service
project string
cache *tokenCache
}
// Token as string alias empathizes that this is a secret.
type Token string
const (
cacheSize = 1000
cacheExpire = 5 * time.Minute
)
// NewTokenVerifier returns a new TokenVerifier instance for a cloud project.
func NewTokenVerifier(ctx context.Context, c *http.Client, project string) (*TokenVerifier, error) {
s, err := iam.NewService(ctx, option.WithHTTPClient(c))
if err != nil {
return nil, errors.Wrap(err, "failed to create service client")
}
tc, err := newTokenCache(cacheSize, cacheExpire)
if err != nil {
return nil, errors.Wrap(err, "failed to create token cache")
}
return &TokenVerifier{s: s, project: project, cache: tc}, nil
}
// Verify if a given token has "actAs" permission on a given service account.
func (v *TokenVerifier) Verify(ctx context.Context, token Token, sa string) error {
resource := fmt.Sprintf("projects/%s/serviceAccounts/%s@%s.iam.gserviceaccount.com", v.project, sa, v.project)
const iamActAs = "iam.serviceAccounts.actAs"
// check the cache first
actAs, found := v.cache.actAs(token, resource)
if found && actAs {
return nil
}
if found && !actAs {
return fmt.Errorf("token is missing permission %q for resource %q (cached)",
iamActAs, resource)
}
// query IAM if not found in cache
ts := time.Now()
resp, err := doTestIamPermissions(ctx, v.s, string(token), resource, []string{iamActAs})
if err != nil {
return errors.Wrapf(err, "TestIamPermissions failed for resource %q with permission %q after %.3fs",
resource, iamActAs, time.Since(ts).Seconds())
}
if !contains(resp.Permissions, iamActAs) {
v.cache.add(token, resource, false)
return fmt.Errorf("token is missing permission %q for resource %q", iamActAs, resource)
}
v.cache.add(token, resource, true)
return nil
}
// doTestIamPermissions retry parameters
const (
timeout = time.Second * 5
retryInterval = time.Second * 1
retries = 2
)
func doTestIamPermissions(ctx context.Context, s *iam.Service, token, resource string, permissions []string) (*iam.TestIamPermissionsResponse, error) {
preq := iam.TestIamPermissionsRequest{Permissions: permissions}
pcall := s.Projects.ServiceAccounts.TestIamPermissions(resource, &preq)
pcall.Header().Set("Authorization", "Bearer "+string(token))
for i := 0; i <= retries; i++ {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
r, err := pcall.Context(ctx).Do()
if err == nil { // no error, return response
return r, nil
}
// continue/retry only on DeadlineExceeded errors
if !errors.Is(err, context.DeadlineExceeded) {
return nil, err
}
select {
case <-ctx.Done():
return r, ctx.Err()
// static retry interval
case <-time.After(retryInterval):
}
}
return nil, errors.New("")
}
func contains(s []string, str string) bool {
for _, v := range s {
if v == str {
return true
}
}
return false
}
================================================
FILE: src/go/cmd/token-vendor/repository/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["repository.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/repository",
visibility = ["//visibility:public"],
)
================================================
FILE: src/go/cmd/token-vendor/repository/k8s/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["k8s.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/repository/k8s",
visibility = ["//visibility:public"],
deps = [
"//src/go/cmd/token-vendor/repository:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_apimachinery//pkg/api/errors:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/runtime:go_default_library",
"@io_k8s_apimachinery//pkg/watch:go_default_library",
"@io_k8s_client_go//kubernetes:go_default_library",
"@io_k8s_client_go//tools/cache:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["k8s_test.go"],
embed = [":go_default_library"],
deps = [
"//src/go/cmd/token-vendor/repository:go_default_library",
"@io_k8s_client_go//kubernetes/fake:go_default_library",
],
)
================================================
FILE: src/go/cmd/token-vendor/repository/k8s/k8s.go
================================================
// Copyright 2022 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package k8s
import (
"context"
"fmt"
"log/slog"
"time"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/repository"
"github.com/googlecloudrobotics/ilog"
)
// Re-list all ConfigMaps periodically. If this causes problems, consider
// setting to 0 instead to disable, but hopefully this provides provides
// some defense against bugs without being too costly.
const resyncPeriod = 1 * time.Hour
// K8sRepository uses Kubernetes configmaps as public key backend for devices.
type K8sRepository struct {
kcl kubernetes.Interface // client-go Clientset
ns string // The namespace to use
cmInformer cache.SharedIndexInformer
}
// NewK8sRepository creates a new K8sRepository key repository.
//
// Use `ns` to specify an existing namespace to use for the device configmaps. Provide
// either a k8s.io/client-go/kubernetes/fake.NewSimpleClientset() for `kcl`
// for testing, or a real Interface from kubernetes.NewForConfig(..).
func NewK8sRepository(ctx context.Context, kcl kubernetes.Interface, ns string) (*K8sRepository, error) {
// The informer provides an in-memory cache and prevents us from hammering the apiserver.
cmInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
return kcl.CoreV1().ConfigMaps(ns).List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
return kcl.CoreV1().ConfigMaps(ns).Watch(ctx, options)
},
},
&corev1.ConfigMap{},
resyncPeriod,
cache.Indexers{},
)
go cmInformer.Run(ctx.Done())
// Wait for the cache to sync before returning so we don't serve requests
// until we're ready.
if !cache.WaitForCacheSync(ctx.Done(), cmInformer.HasSynced) {
return nil, fmt.Errorf("failed to sync configmap cache")
}
return &K8sRepository{kcl: kcl, ns: ns, cmInformer: cmInformer}, nil
}
const (
pubKey = "pubKey" // Configmap key for the public key
// Configmap annotation specifies the service account to use (optional)
serviceAccountAnnotation = "cloudrobotics.com/gcp-service-account"
// Configmap annotation specifies the intermediate service account delegate to use (optional)
serviceAccountDelegateAnnotation = "cloudrobotics.com/gcp-service-account-delegate"
)
// ListAllDeviceIDs returns a slice of all device identifiers found in the namespace.
func (k *K8sRepository) ListAllDeviceIDs(ctx context.Context) ([]string, error) {
objs := k.cmInformer.GetStore().List()
names := make([]string, 0)
for _, obj := range objs {
cm, ok := obj.(*corev1.ConfigMap)
if ok {
names = append(names, cm.GetName())
}
}
return names, nil
}
// LookupKey returns the public key for a given device identifier.
//
// The public key is stored under a specific key in the configmap. Returns an
// error if the configmap is not found or is not valid.
func (k *K8sRepository) LookupKey(ctx context.Context, deviceID string) (*repository.Key, error) {
slog.Debug("looking up public key", slog.String("Namespace", k.ns), slog.String("ConfigMap", deviceID))
obj, exists, err := k.cmInformer.GetStore().GetByKey(k.ns + "/" + deviceID)
if err != nil {
return nil, errors.Wrapf(err, "failed to retrieve configmap %q/%q from cache", k.ns, deviceID)
}
if !exists {
return nil, errors.Wrapf(repository.ErrNotFound, "failed to retrieve configmap %q/%q", k.ns, deviceID)
}
cm, ok := obj.(*corev1.ConfigMap)
if !ok {
return nil, fmt.Errorf("unexpected object type: %T", obj)
}
key, found := cm.Data[pubKey]
if !found {
return nil, fmt.Errorf("configmap %q/%q does not contain key %q", k.ns, deviceID, pubKey)
}
sa := cm.ObjectMeta.Annotations[serviceAccountAnnotation]
saDelegate := cm.ObjectMeta.Annotations[serviceAccountDelegateAnnotation]
return &repository.Key{key, sa, saDelegate}, nil
}
// PublishKey sets or updates a public key for a given device identifier.
//
// If the configmap for a device does not exist yet it is created. If it exists
// already the public key section of the configmap is updated.
func (k *K8sRepository) PublishKey(ctx context.Context, deviceID, publicKey string) error {
slog.Debug("publishing key", slog.String("DeviceID", deviceID))
cm, err := createPubKeyDeviceConfig(deviceID, k.ns, publicKey)
if err != nil {
return errors.Wrapf(err, "failed to init device configmap %q/%q", k.ns, deviceID)
}
_, err = k.kcl.CoreV1().ConfigMaps(k.ns).Create(ctx, cm, metav1.CreateOptions{})
if err == nil { // no error
// Add to the informer store so that LookupKey can be used immediately.
if err := k.cmInformer.GetStore().Add(cm); err != nil {
slog.Warn("failed to add to informer store", slog.String("DeviceID", deviceID), ilog.Err(err))
}
return nil
}
if !kerrors.IsAlreadyExists(err) { // any error not AlreadyExist
return errors.Wrapf(err, "failed to create device configmap %q/%q", k.ns, deviceID)
}
// AlreadyExist error, updating configmap.
// We do not want to override any other keys besides the public key here.
// createPubKeyDeviceConfig only creates a minimum configmap so updating is safe here.
if _, err := k.kcl.CoreV1().ConfigMaps(k.ns).Update(ctx, cm, metav1.UpdateOptions{}); err != nil {
return errors.Wrapf(err, "configmap %q/%q exists but failed to update it", k.ns, deviceID)
}
// Update the informer store so that LookupKey can be used immediately.
if err := k.cmInformer.GetStore().Update(cm); err != nil {
slog.Warn("failed to update informer store", slog.String("DeviceID", deviceID), ilog.Err(err))
}
return nil
}
func (k *K8sRepository) ConfigureKey(ctx context.Context, deviceID string, opts repository.KeyOptions) error {
cm, err := k.kcl.CoreV1().ConfigMaps(k.ns).Get(ctx, deviceID, metav1.GetOptions{})
if err != nil {
if kerrors.IsNotFound(err) {
return errors.Wrapf(repository.ErrNotFound, "failed to retrieve configmap %q/%q", k.ns, deviceID)
}
return errors.Wrapf(err, "failed to retrieve configmap %q/%q from cache", k.ns, deviceID)
}
if cm.ObjectMeta.Annotations == nil {
cm.ObjectMeta.Annotations = make(map[string]string)
}
mapSetOrDelete(cm.ObjectMeta.Annotations, serviceAccountAnnotation, opts.ServiceAccount)
mapSetOrDelete(cm.ObjectMeta.Annotations, serviceAccountDelegateAnnotation, opts.ServiceAccountDelegate)
if _, err := k.kcl.CoreV1().ConfigMaps(k.ns).Update(ctx, cm, metav1.UpdateOptions{}); err != nil {
return errors.Wrapf(err, "failed to update configmap %q/%q", k.ns, deviceID)
}
// Update the informer store so that LookupKey can be used immediately.
if err := k.cmInformer.GetStore().Update(cm); err != nil {
slog.Warn("failed to update informer store", slog.String("DeviceID", deviceID), ilog.Err(err))
}
return nil
}
// createPubKeyDeviceConfig creates a configmap with only the public key in it.
//
// This is used also during update of existing devices. Make sure no default values
// are used here which could override a manually set key.
func createPubKeyDeviceConfig(name, namespace, pk string) (*corev1.ConfigMap, error) {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
Labels: map[string]string{
"app.kubernetes.io/managed-by": "token-vendor",
},
},
Data: map[string]string{pubKey: pk},
}, nil
}
func mapSetOrDelete(m map[string]string, k, v string) {
if v != "" {
m[k] = v
} else {
delete(m, k)
}
}
================================================
FILE: src/go/cmd/token-vendor/repository/k8s/k8s_test.go
================================================
// Copyright 2022 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package k8s
import (
"context"
"errors"
"testing"
"k8s.io/client-go/kubernetes/fake"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/repository"
)
// Publish a key, retrieve it again and check listing of all keys.
func TestPublishListLookup(t *testing.T) {
ctx := context.Background()
cs := fake.NewSimpleClientset()
kcl, err := NewK8sRepository(ctx, cs, "default")
if err != nil {
t.Fatal(err)
}
const id = "testdevice"
const key = "testkey"
if err = kcl.PublishKey(ctx, id, key); err != nil {
t.Fatal(err)
}
if _, err = kcl.LookupKey(ctx, id); err != nil {
t.Fatal(err)
}
devices, err := kcl.ListAllDeviceIDs(ctx)
if err != nil {
t.Fatal(err)
}
if len(devices) != 1 || devices[0] != id {
t.Fatalf(`ListAllDeviceIDs() = %v, want [%q]`, devices, id)
}
}
// Publish a key and override it with another one.
func TestPublishKeyUpdate(t *testing.T) {
ctx := context.Background()
cs := fake.NewSimpleClientset()
kcl, err := NewK8sRepository(ctx, cs, "default")
if err != nil {
t.Fatal(err)
}
const id = "testdevice"
const key2 = "testkey2"
if err = kcl.PublishKey(ctx, id, "testkey"); err != nil {
t.Fatal(err)
}
if err = kcl.PublishKey(ctx, id, key2); err != nil {
t.Fatal(err)
}
k, err := kcl.LookupKey(ctx, id)
if err != nil {
t.Fatal(err)
}
if k.PublicKey != key2 {
t.Fatalf("LookupKey(..) = %q, want %q", k.PublicKey, key2)
}
}
func TestLookupDoesNotExist(t *testing.T) {
ctx := context.Background()
cs := fake.NewSimpleClientset()
kcl, err := NewK8sRepository(ctx, cs, "default")
if err != nil {
t.Fatal(err)
}
k, err := kcl.LookupKey(ctx, "testdevice")
if !errors.Is(err, repository.ErrNotFound) {
t.Fatalf("LookupKey produced wrong error: got %v, want %v", err, repository.ErrNotFound)
}
if k != nil {
t.Fatalf("LookupKey(..) = %q, want nil", k)
}
}
func TestConfigure(t *testing.T) {
ctx := context.Background()
cs := fake.NewSimpleClientset()
kcl, err := NewK8sRepository(ctx, cs, "default")
if err != nil {
t.Fatal(err)
}
const id = "testdevice"
const key = "testkey"
if err = kcl.PublishKey(ctx, id, key); err != nil {
t.Fatal(err)
}
opts := repository.KeyOptions{"svc@example.com", ""}
if err := kcl.ConfigureKey(ctx, id, opts); err != nil {
t.Fatal(err)
}
k, err := kcl.LookupKey(ctx, id)
if err != nil {
t.Fatal(err)
}
if k.SAName != "svc@example.com" {
t.Fatalf("LookupKey: got %q, expected %q", k.SAName, "svc@example.com")
}
}
func TestReConfigure(t *testing.T) {
ctx := context.Background()
cs := fake.NewSimpleClientset()
kcl, err := NewK8sRepository(ctx, cs, "default")
if err != nil {
t.Fatal(err)
}
const id = "testdevice"
const key = "testkey"
if err = kcl.PublishKey(ctx, id, key); err != nil {
t.Fatal(err)
}
opts := repository.KeyOptions{"svc@example.com", ""}
if err := kcl.ConfigureKey(ctx, id, opts); err != nil {
t.Fatal(err)
}
// remove the config again
opts = repository.KeyOptions{"", ""}
if err := kcl.ConfigureKey(ctx, id, opts); err != nil {
t.Fatal(err)
}
k, err := kcl.LookupKey(ctx, id)
if err != nil {
t.Fatal(err)
}
if k.SAName != "" {
t.Fatalf("LookupKey: got %q, expected %q", k.SAName, "svc@example.com")
}
}
================================================
FILE: src/go/cmd/token-vendor/repository/memory/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["memory.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/repository/memory",
visibility = ["//visibility:public"],
deps = [
"//src/go/cmd/token-vendor/repository:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["memory_test.go"],
embed = [":go_default_library"],
deps = ["//src/go/cmd/token-vendor/repository:go_default_library"],
)
================================================
FILE: src/go/cmd/token-vendor/repository/memory/memory.go
================================================
// Copyright 2022 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memory
import (
"context"
"log/slog"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/repository"
)
// MemoryRepository uses a in-memory datastructure to store the keys.
// Used only for integration tests.
type MemoryRepository struct {
keys map[string]string
opts map[string]repository.KeyOptions
}
func NewMemoryRepository(ctx context.Context) (*MemoryRepository, error) {
return &MemoryRepository{
keys: map[string]string{},
opts: map[string]repository.KeyOptions{},
}, nil
}
func (m *MemoryRepository) PublishKey(ctx context.Context, deviceID, publicKey string) error {
slog.Debug("PublishKey", slog.String("DeviceID", deviceID), slog.String("PublicKey", publicKey))
m.keys[deviceID] = publicKey
return nil
}
func (m *MemoryRepository) LookupKey(ctx context.Context, deviceID string) (*repository.Key, error) {
slog.Debug("LookupKey", slog.String("DeviceID", deviceID))
// key not found does not need to be an error
k, found := m.keys[deviceID]
if !found {
return nil, repository.ErrNotFound
}
opts, found := m.opts[deviceID]
if !found {
opts = repository.KeyOptions{}
}
return &repository.Key{k, opts.ServiceAccount, opts.ServiceAccountDelegate}, nil
}
func (m *MemoryRepository) ConfigureKey(ctx context.Context, deviceID string, opts repository.KeyOptions) error {
m.opts[deviceID] = opts
return nil
}
================================================
FILE: src/go/cmd/token-vendor/repository/memory/memory_test.go
================================================
// Copyright 2022 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memory
import (
"context"
"errors"
"testing"
"github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/repository"
)
// Test publish and lookup key
func TestPublishAndLookup(t *testing.T) {
ctx := context.Background()
m, err := NewMemoryRepository(ctx)
if err != nil {
t.Fatal(err)
}
if err := m.PublishKey(ctx, "a", "akey"); err != nil {
t.Fatal(err)
}
if err := m.PublishKey(ctx, "b", "bkey"); err != nil {
t.Fatal(err)
}
k, err := m.LookupKey(ctx, "a")
if err != nil {
t.Fatal(err)
}
if k.PublicKey != "akey" {
t.Fatalf("Key for a: got %q, want %q", k, "akey")
}
k, err = m.LookupKey(ctx, "b")
if err != nil {
t.Fatal(err)
}
if k.PublicKey != "bkey" {
t.Fatalf("Key for b: got %q, want %q", k, "bkey")
}
}
func TestLookupNotFound(t *testing.T) {
ctx := context.Background()
m, err := NewMemoryRepository(ctx)
if err != nil {
t.Fatal(err)
}
k, err := m.LookupKey(ctx, "a")
if !errors.Is(err, repository.ErrNotFound) {
t.Fatalf("LookupKey produced wrong error: got %v, want %v", err, repository.ErrNotFound)
}
if k != nil {
t.Fatalf("LookupKey: got %q, expected empty response", k)
}
}
func TestConfigure(t *testing.T) {
ctx := context.Background()
m, err := NewMemoryRepository(ctx)
if err != nil {
t.Fatal(err)
}
if err := m.PublishKey(ctx, "a", "akey"); err != nil {
t.Fatal(err)
}
opts := repository.KeyOptions{"svc@example.com", ""}
if err := m.ConfigureKey(ctx, "a", opts); err != nil {
t.Fatal(err)
}
k, err := m.LookupKey(ctx, "a")
if err != nil {
t.Fatal(err)
}
if k.SAName != "svc@example.com" {
t.Fatalf("LookupKey: got %q, expected %q", k.SAName, "svc@example.com")
}
}
================================================
FILE: src/go/cmd/token-vendor/repository/repository.go
================================================
// Copyright 2024 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package repository defines the api for the pub key stores
package repository
import (
"context"
"errors"
)
var (
// ErrNotFound indicates that the requested key is not known.
ErrNotFound = errors.New("key not found")
)
// Key holds data + metadata of a public key entry
type Key struct {
// PublicKey contains the public key data
PublicKey string
// SAName is the optional GCP IAM service-account that has been associated.
SAName string
// SADelegateName is the optional GCP IAM service-account to act as an intermediate delegate
SADelegateName string
}
// KeyOptions contain optional settings for a key
type KeyOptions struct {
ServiceAccount string `json:"service-account"`
ServiceAccountDelegate string `json:"service-account-delegate"`
}
// PubKeyRepository defines the api for the pub key stores
type PubKeyRepository interface {
// LookupKey retrieves the public key of a device from the repository.
// An empty string return indicates that no key exists for the given identifier or
// that the device is blocked.
LookupKey(ctx context.Context, deviceID string) (*Key, error)
PublishKey(ctx context.Context, deviceID, publicKey string) error
// ConfigureKey applies the given opts to the key store.
ConfigureKey(ctx context.Context, deviceID string, opts KeyOptions) error
}
================================================
FILE: src/go/cmd/token-vendor/testdata/describe_device_a.json
================================================
{
"id": "robot-dev-device-a",
"name": "projects/testproject/locations/europe-west1/registries/cloud-robotics/devices/3072877074145970",
"numId": "3072877074145970",
"credentials": [
{
"publicKey": {
"format": "RSA_PEM",
"key": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvTGUksynbWhvZkHNJn8C2oXVD400jiK4T0JoyS/SwbBGwFr3OJGlPwXCsvAPAzmpTuZpge6T3pnIcO/s97sMgyld9ZYio7SQiiRV/nwYZittGf9/yfHSNDJUvT25yhuK2p3UqRCom1a3KljeXbxXvGuYG48IH0kqAQbYBI/0lAV3H5pkdXPFZC6PHltC3jySVIOg7qPXrNuxdxmg/gmzQ9+NmKvXWKATAPax1yYoESaZtc22aCZWouIdJr3baYlfBb4w8stoJPoONuyn4ard17gywb46HHGl2XoY+Y5pihwvctsFeZXLfYwUmFPfgncQHJ02lCV3+Xyk4AAZy3xDpwIDAQAB\n-----END PUBLIC KEY-----"
},
"expirationTime": "1970-01-01T00:00:00Z"
}
],
"config": {
"version": "1",
"cloudUpdateTime": "2022-08-18T15:36:53.627428Z"
},
"gatewayConfig": {}
}
================================================
FILE: src/go/cmd/token-vendor/testdata/describe_device_b.json
================================================
{
"id": "robot-dev-device-b",
"name": "projects/testproject/locations/europe-west1/registries/cloud-robotics/devices/3072877074145970",
"numId": "3072877074145970",
"credentials": [
{
"publicKey": {
"format": "RSA_PEM",
"key": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvTGUksynbWhvZkHNJn8C2oXVD400jiK4T0JoyS/SwbBGwFr3OJGlPwXCsvAPAzmpTuZpge6T3pnIcO/s97sMgyld9ZYio7SQiiRV/nwYZittGf9/yfHSNDJUvT25yhuK2p3UqRCom1a3KljeXbxXvGuYG48IH0kqAQbYBI/0lAV3H5pkdXPFZC6PHltC3jySVIOg7qPXrNuxdxmg/gmzQ9+NmKvXWKATAPax1yYoESaZtc22aCZWouIdJr3baYlfBb4w8stoJPoONuyn4ard17gywb46HHGl2XoY+Y5pihwvctsFeZXLfYwUmFPfgncQHJ02lCV3+Xyk4AAZy3xDpwIDAQAB\n-----END PUBLIC KEY-----"
},
"expirationTime": "1970-01-01T00:00:00Z"
}
],
"config": {
"version": "1",
"cloudUpdateTime": "2022-08-18T15:36:53.627428Z"
},
"gatewayConfig": {}
}
================================================
FILE: src/go/cmd/token-vendor/testdata/describe_device_b_blocked.json
================================================
{
"id": "robot-dev-device-b",
"name": "projects/testproject/locations/europe-west1/registries/cloud-robotics/devices/3072877074145970",
"numId": "3072877074145970",
"blocked": true,
"credentials": [
{
"publicKey": {
"format": "RSA_PEM",
"key": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvTGUksynbWhvZkHNJn8C2oXVD400jiK4T0JoyS/SwbBGwFr3OJGlPwXCsvAPAzmpTuZpge6T3pnIcO/s97sMgyld9ZYio7SQiiRV/nwYZittGf9/yfHSNDJUvT25yhuK2p3UqRCom1a3KljeXbxXvGuYG48IH0kqAQbYBI/0lAV3H5pkdXPFZC6PHltC3jySVIOg7qPXrNuxdxmg/gmzQ9+NmKvXWKATAPax1yYoESaZtc22aCZWouIdJr3baYlfBb4w8stoJPoONuyn4ard17gywb46HHGl2XoY+Y5pihwvctsFeZXLfYwUmFPfgncQHJ02lCV3+Xyk4AAZy3xDpwIDAQAB\n-----END PUBLIC KEY-----"
},
"expirationTime": "1970-01-01T00:00:00Z"
}
],
"config": {
"version": "1",
"cloudUpdateTime": "2022-08-18T15:36:53.627428Z"
},
"gatewayConfig": {}
}
================================================
FILE: src/go/cmd/token-vendor/testdata/list_devices.json
================================================
{
"devices": [
{
"id": "testdevice-a",
"name": "projects/testproject/locations/testlocation/registries/testregistry/devices/1",
"numId": "1",
"gatewayConfig": {}
},
{
"id": "testdevice-b",
"name": "projects/testproject/locations/testlocation/registries/testregistry/devices/2",
"numId": "2",
"gatewayConfig": {}
}
]
}
================================================
FILE: src/go/cmd/token-vendor/tokensource/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["gcp.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/cmd/token-vendor/tokensource",
visibility = ["//visibility:public"],
deps = [
"@com_github_pkg_errors//:go_default_library",
"@com_google_cloud_go_compute_metadata//:go_default_library",
"@org_golang_google_api//iamcredentials/v1:go_default_library",
"@org_golang_google_api//option:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["gcp_test.go"],
embed = [":go_default_library"],
deps = [
"@com_github_google_go_cmp//cmp:go_default_library",
"@org_golang_google_api//iamcredentials/v1:go_default_library",
],
)
================================================
FILE: src/go/cmd/token-vendor/tokensource/gcp.go
================================================
package tokensource
import (
"context"
"fmt"
"log/slog"
"math"
"net/http"
"strings"
"sync/atomic"
"time"
"cloud.google.com/go/compute/metadata"
"github.com/pkg/errors"
iam "google.golang.org/api/iamcredentials/v1"
"google.golang.org/api/option"
)
type GCPTokenSource struct {
service *iam.Service
scopes []string
}
type TokenResponse struct {
AccessToken string `json:"access_token"`
ExpiresIn int64 `json:"expires_in"`
Scope string `json:"scope"`
TokenType string `json:"token_type"`
}
const (
saPrefix = "projects/-/serviceAccounts/"
)
// NewGCPTokenSource creates a token source for GCP access tokens.
//
// `client` parameter is optional. If you supply your own client, you have to make
// sure you set the correct authentication headers yourself. If no client is given,
// authentication information is looked up from the environment.
// `defaultSAName` specifies the GCP IAM service accoutn name to use if no
// dedicated service account is configurred on the key.
func NewGCPTokenSource(ctx context.Context, client *http.Client, scopes []string) (*GCPTokenSource, error) {
service, err := iam.NewService(ctx, option.WithHTTPClient(client))
if err != nil {
return nil, errors.Wrap(err, "failed to create IAM service client")
}
return &GCPTokenSource{service: service, scopes: scopes}, nil
}
// Token returns an access token for the configured service account and scopes.
//
// API: https://cloud.google.com/iam/docs/reference/credentials/rest/v1/projects.serviceAccounts/generateAccessToken
func (g *GCPTokenSource) Token(ctx context.Context, saName, saDelegateName string) (*TokenResponse, error) {
if saName == "" {
return nil, fmt.Errorf("saName must not be empty")
}
var delegates []string
if saDelegateName != "" {
// Impersonation was requested; constructing impersonation chain
// [saDelegateName]. For details on impersonation requirements
// see: https://docs.cloud.google.com/iam/docs/service-account-impersonation
delegates = []string{saPrefix + saDelegateName}
}
req := iam.GenerateAccessTokenRequest{
Scope: g.scopes,
Delegates: delegates,
}
resource := saPrefix + saName
slog.DebugContext(ctx, "Requesting token", slog.String("principal", resource), slog.Any("delegates", delegates))
// We don't set a 'lifetime' on the request, so we get the default value (3600 sec = 1h).
// This needs to be in sync with the min(cookie-expire,cookie-refresh) duration
// configured on oauth2-proxy.
resp, err := g.service.Projects.ServiceAccounts.
GenerateAccessToken(resource, &req).Context(ctx).Do()
if err != nil {
return nil, errors.Wrapf(err, "GenerateAccessToken(..) for %q failed", resource)
}
tok, err := tokenResponse(resp, g.scopes, time.Now())
if err != nil {
return nil, errors.Wrapf(err, "failed to generate token response from GCP response")
}
return tok, nil
}
// tokenResponse returns a TokenResponse struct given an IAM response object.
func tokenResponse(r *iam.GenerateAccessTokenResponse, scopes []string, now time.Time) (*TokenResponse, error) {
tr := TokenResponse{
TokenType: "Bearer",
AccessToken: r.AccessToken,
Scope: strings.Join(scopes, " ")}
// calculate ExpiresIn
exp, err := time.Parse(time.RFC3339Nano, r.ExpireTime)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse expiration time %q", r.ExpireTime)
}
diff := now.Sub(exp)
tr.ExpiresIn = int64(math.Abs(diff.Seconds()))
return &tr, nil
}
var workloadServiceAccount = new(atomic.Pointer[string])
// getWorkloadServiceAccount returns a service account email for the pod running
// token vendor if available. Value is cached, so subsequent calls save time
// on contacting metadata server.
func getWorkloadServiceAccount(ctx context.Context) (string, error) {
if val := workloadServiceAccount.Load(); val != nil {
return *val, nil
}
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
// Fetch the email for the 'default' service account.
// In Workload Identity, 'default' is the IAM SA mapped to the pod.
email, err := metadata.EmailWithContext(ctx, "default")
if err != nil {
return "", fmt.Errorf("cannot obtain service account from metadata: %w", err)
}
// we are going to perform write ONLY if old value is nil, otherwise
// we have the same value stored there anyway due to how metadata
// server works for pods on GKE.
workloadServiceAccount.CompareAndSwap(nil, &email)
return email, nil
}
================================================
FILE: src/go/cmd/token-vendor/tokensource/gcp_test.go
================================================
package tokensource
import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
iam "google.golang.org/api/iamcredentials/v1"
)
type TokenResponseTest struct {
desc string
req iam.GenerateAccessTokenResponse
scopes []string
now time.Time
tr TokenResponse
wantErr bool
}
func TestTokenResponse(t *testing.T) {
now, _ := time.Parse(time.RFC3339Nano, "1986-06-30T15:01:23.045123456Z")
var cases = []TokenResponseTest{
{
desc: "happy path",
req: iam.GenerateAccessTokenResponse{
AccessToken: "abc",
ExpireTime: "1986-06-30T15:02:06.045123456Z"},
scopes: []string{"a", "b"},
now: now,
tr: TokenResponse{AccessToken: "abc", ExpiresIn: 43, Scope: "a b", TokenType: "Bearer"},
wantErr: false,
},
}
for _, test := range cases {
t.Run(test.desc, func(t *testing.T) {
got, err := tokenResponse(&test.req, test.scopes, test.now)
if (test.wantErr && err == nil) || (!test.wantErr) && err != nil {
t.Fatalf("tokenResponse(..): got error %v, want %v", err, test.wantErr)
}
if diff := cmp.Diff(got, &test.tr); diff != "" {
t.Fatalf("tokenResponse(..): got %+v, wanted %+v, diff %v", got, test.tr, diff)
}
})
}
}
================================================
FILE: src/go/generate.sh
================================================
#!/usr/bin/env bash
#
# Copyright 2019 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script can be run just like the regular dep tool. It copies the Go
# code to a shadow repo against dep can operate as usual and copies the
# resulting Gopkg.toml and Gopkg.lock files to this directory.
# It then stages the changed dependenies in the bazel WORKSPACE for manual cleanup.
set -e
# K8S release for api, apimachinery and code-generator
K8S_RELEASE="release-1.22"
CURRENT_DIR=$(pwd)
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# We create the shadow repo one dir up because the dep tool falsely tries to
# truncate the GOPATH we provide after the first /go/ dir it sees.
SHADOW_REPO="${DIR}/../.gopath/src/github.com/googlecloudrobotics/core/src/go"
export GOPATH="${DIR}/../.gopath"
export GOBIN="${GOPATH}/bin"
go install k8s.io/code-generator/cmd/{applyconfiguration-gen,defaulter-gen,client-gen,lister-gen,informer-gen,deepcopy-gen}@${K8S_RELEASE}
export PATH="$PATH:$GOPATH/bin"
mkdir -p ${SHADOW_REPO}
rm -rf "${DIR}/pkg/client"
rm -rf "${SHADOW_REPO}/pkg/apis"
rm -rf "${SHADOW_REPO}/pkg/client"
cp -r ${DIR}/* ${SHADOW_REPO}
function finalize {
cp -rT ${SHADOW_REPO}/pkg/client ${DIR}/pkg/client
cp -rT ${SHADOW_REPO}/pkg/apis ${DIR}/pkg/apis
cd ${CURRENT_DIR}
# Re-generate BUILD files for generated packages.
${DIR}/../gomod.sh
}
trap finalize EXIT
cd ${SHADOW_REPO}
REPO=github.com/googlecloudrobotics/core/src/go
cat > "${SHADOW_REPO}/HEADER" < 0 {
if configShallowCopy.Burst <= 0 {
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
}
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
cs.appsV1alpha1, err = appsv1alpha1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.registryV1alpha1, err = registryv1alpha1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
return &cs, nil
}
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.appsV1alpha1 = appsv1alpha1.NewForConfigOrDie(c)
cs.registryV1alpha1 = registryv1alpha1.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return &cs
}
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.appsV1alpha1 = appsv1alpha1.New(c)
cs.registryV1alpha1 = registryv1alpha1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
}
================================================
FILE: src/go/pkg/client/versioned/doc.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated clientset.
package versioned
================================================
FILE: src/go/pkg/client/versioned/fake/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"clientset_generated.go",
"doc.go",
"register.go",
],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/fake",
visibility = ["//visibility:public"],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"//src/go/pkg/apis/registry/v1alpha1:go_default_library",
"//src/go/pkg/client/versioned:go_default_library",
"//src/go/pkg/client/versioned/typed/apps/v1alpha1:go_default_library",
"//src/go/pkg/client/versioned/typed/apps/v1alpha1/fake:go_default_library",
"//src/go/pkg/client/versioned/typed/registry/v1alpha1:go_default_library",
"//src/go/pkg/client/versioned/typed/registry/v1alpha1/fake:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/runtime:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/schema:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/serializer:go_default_library",
"@io_k8s_apimachinery//pkg/util/runtime:go_default_library",
"@io_k8s_apimachinery//pkg/watch:go_default_library",
"@io_k8s_client_go//discovery:go_default_library",
"@io_k8s_client_go//discovery/fake:go_default_library",
"@io_k8s_client_go//testing:go_default_library",
],
)
================================================
FILE: src/go/pkg/client/versioned/fake/clientset_generated.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
clientset "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned"
appsv1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/typed/apps/v1alpha1"
fakeappsv1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/typed/apps/v1alpha1/fake"
registryv1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/typed/registry/v1alpha1"
fakeregistryv1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/typed/registry/v1alpha1/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/testing"
)
// NewSimpleClientset returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &Clientset{tracker: o}
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()
ns := action.GetNamespace()
watch, err := o.Watch(gvr, ns)
if err != nil {
return false, nil, err
}
return true, watch, nil
})
return cs
}
// Clientset implements clientset.Interface. Meant to be embedded into a
// struct to get a default implementation. This makes faking out just the method
// you want to test easier.
type Clientset struct {
testing.Fake
discovery *fakediscovery.FakeDiscovery
tracker testing.ObjectTracker
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return c.discovery
}
func (c *Clientset) Tracker() testing.ObjectTracker {
return c.tracker
}
var (
_ clientset.Interface = &Clientset{}
_ testing.FakeClient = &Clientset{}
)
// AppsV1alpha1 retrieves the AppsV1alpha1Client
func (c *Clientset) AppsV1alpha1() appsv1alpha1.AppsV1alpha1Interface {
return &fakeappsv1alpha1.FakeAppsV1alpha1{Fake: &c.Fake}
}
// RegistryV1alpha1 retrieves the RegistryV1alpha1Client
func (c *Clientset) RegistryV1alpha1() registryv1alpha1.RegistryV1alpha1Interface {
return &fakeregistryv1alpha1.FakeRegistryV1alpha1{Fake: &c.Fake}
}
================================================
FILE: src/go/pkg/client/versioned/fake/doc.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated fake clientset.
package fake
================================================
FILE: src/go/pkg/client/versioned/fake/register.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
appsv1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
registryv1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/registry/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
appsv1alpha1.AddToScheme,
registryv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(scheme))
}
================================================
FILE: src/go/pkg/client/versioned/scheme/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"register.go",
],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/scheme",
visibility = ["//visibility:public"],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"//src/go/pkg/apis/registry/v1alpha1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/runtime:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/schema:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/serializer:go_default_library",
"@io_k8s_apimachinery//pkg/util/runtime:go_default_library",
],
)
================================================
FILE: src/go/pkg/client/versioned/scheme/doc.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
// This package contains the scheme of the automatically generated clientset.
package scheme
================================================
FILE: src/go/pkg/client/versioned/scheme/register.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
appsv1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
registryv1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/registry/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
appsv1alpha1.AddToScheme,
registryv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(Scheme))
}
================================================
FILE: src/go/pkg/client/versioned/typed/apps/v1alpha1/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"app.go",
"approllout.go",
"apps_client.go",
"chartassignment.go",
"doc.go",
"generated_expansion.go",
"resourceset.go",
],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/typed/apps/v1alpha1",
visibility = ["//visibility:public"],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"//src/go/pkg/client/versioned/scheme:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/types:go_default_library",
"@io_k8s_apimachinery//pkg/watch:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
],
)
================================================
FILE: src/go/pkg/client/versioned/typed/apps/v1alpha1/app.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
"time"
v1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
scheme "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// AppsGetter has a method to return a AppInterface.
// A group's client should implement this interface.
type AppsGetter interface {
Apps() AppInterface
}
// AppInterface has methods to work with App resources.
type AppInterface interface {
Create(ctx context.Context, app *v1alpha1.App, opts v1.CreateOptions) (*v1alpha1.App, error)
Update(ctx context.Context, app *v1alpha1.App, opts v1.UpdateOptions) (*v1alpha1.App, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.App, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.AppList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.App, err error)
AppExpansion
}
// apps implements AppInterface
type apps struct {
client rest.Interface
}
// newApps returns a Apps
func newApps(c *AppsV1alpha1Client) *apps {
return &apps{
client: c.RESTClient(),
}
}
// Get takes name of the app, and returns the corresponding app object, and an error if there is any.
func (c *apps) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.App, err error) {
result = &v1alpha1.App{}
err = c.client.Get().
Resource("apps").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of Apps that match those selectors.
func (c *apps) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AppList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.AppList{}
err = c.client.Get().
Resource("apps").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested apps.
func (c *apps) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Resource("apps").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a app and creates it. Returns the server's representation of the app, and an error, if there is any.
func (c *apps) Create(ctx context.Context, app *v1alpha1.App, opts v1.CreateOptions) (result *v1alpha1.App, err error) {
result = &v1alpha1.App{}
err = c.client.Post().
Resource("apps").
VersionedParams(&opts, scheme.ParameterCodec).
Body(app).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a app and updates it. Returns the server's representation of the app, and an error, if there is any.
func (c *apps) Update(ctx context.Context, app *v1alpha1.App, opts v1.UpdateOptions) (result *v1alpha1.App, err error) {
result = &v1alpha1.App{}
err = c.client.Put().
Resource("apps").
Name(app.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(app).
Do(ctx).
Into(result)
return
}
// Delete takes name of the app and deletes it. Returns an error if one occurs.
func (c *apps) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Resource("apps").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *apps) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Resource("apps").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched app.
func (c *apps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.App, err error) {
result = &v1alpha1.App{}
err = c.client.Patch(pt).
Resource("apps").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
================================================
FILE: src/go/pkg/client/versioned/typed/apps/v1alpha1/approllout.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
"time"
v1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
scheme "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// AppRolloutsGetter has a method to return a AppRolloutInterface.
// A group's client should implement this interface.
type AppRolloutsGetter interface {
AppRollouts() AppRolloutInterface
}
// AppRolloutInterface has methods to work with AppRollout resources.
type AppRolloutInterface interface {
Create(ctx context.Context, appRollout *v1alpha1.AppRollout, opts v1.CreateOptions) (*v1alpha1.AppRollout, error)
Update(ctx context.Context, appRollout *v1alpha1.AppRollout, opts v1.UpdateOptions) (*v1alpha1.AppRollout, error)
UpdateStatus(ctx context.Context, appRollout *v1alpha1.AppRollout, opts v1.UpdateOptions) (*v1alpha1.AppRollout, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.AppRollout, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.AppRolloutList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AppRollout, err error)
AppRolloutExpansion
}
// appRollouts implements AppRolloutInterface
type appRollouts struct {
client rest.Interface
}
// newAppRollouts returns a AppRollouts
func newAppRollouts(c *AppsV1alpha1Client) *appRollouts {
return &appRollouts{
client: c.RESTClient(),
}
}
// Get takes name of the appRollout, and returns the corresponding appRollout object, and an error if there is any.
func (c *appRollouts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AppRollout, err error) {
result = &v1alpha1.AppRollout{}
err = c.client.Get().
Resource("approllouts").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of AppRollouts that match those selectors.
func (c *appRollouts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AppRolloutList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.AppRolloutList{}
err = c.client.Get().
Resource("approllouts").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested appRollouts.
func (c *appRollouts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Resource("approllouts").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a appRollout and creates it. Returns the server's representation of the appRollout, and an error, if there is any.
func (c *appRollouts) Create(ctx context.Context, appRollout *v1alpha1.AppRollout, opts v1.CreateOptions) (result *v1alpha1.AppRollout, err error) {
result = &v1alpha1.AppRollout{}
err = c.client.Post().
Resource("approllouts").
VersionedParams(&opts, scheme.ParameterCodec).
Body(appRollout).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a appRollout and updates it. Returns the server's representation of the appRollout, and an error, if there is any.
func (c *appRollouts) Update(ctx context.Context, appRollout *v1alpha1.AppRollout, opts v1.UpdateOptions) (result *v1alpha1.AppRollout, err error) {
result = &v1alpha1.AppRollout{}
err = c.client.Put().
Resource("approllouts").
Name(appRollout.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(appRollout).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *appRollouts) UpdateStatus(ctx context.Context, appRollout *v1alpha1.AppRollout, opts v1.UpdateOptions) (result *v1alpha1.AppRollout, err error) {
result = &v1alpha1.AppRollout{}
err = c.client.Put().
Resource("approllouts").
Name(appRollout.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(appRollout).
Do(ctx).
Into(result)
return
}
// Delete takes name of the appRollout and deletes it. Returns an error if one occurs.
func (c *appRollouts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Resource("approllouts").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *appRollouts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Resource("approllouts").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched appRollout.
func (c *appRollouts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AppRollout, err error) {
result = &v1alpha1.AppRollout{}
err = c.client.Patch(pt).
Resource("approllouts").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
================================================
FILE: src/go/pkg/client/versioned/typed/apps/v1alpha1/apps_client.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
"github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type AppsV1alpha1Interface interface {
RESTClient() rest.Interface
AppsGetter
AppRolloutsGetter
ChartAssignmentsGetter
ResourceSetsGetter
}
// AppsV1alpha1Client is used to interact with features provided by the apps.cloudrobotics.com group.
type AppsV1alpha1Client struct {
restClient rest.Interface
}
func (c *AppsV1alpha1Client) Apps() AppInterface {
return newApps(c)
}
func (c *AppsV1alpha1Client) AppRollouts() AppRolloutInterface {
return newAppRollouts(c)
}
func (c *AppsV1alpha1Client) ChartAssignments() ChartAssignmentInterface {
return newChartAssignments(c)
}
func (c *AppsV1alpha1Client) ResourceSets() ResourceSetInterface {
return newResourceSets(c)
}
// NewForConfig creates a new AppsV1alpha1Client for the given config.
func NewForConfig(c *rest.Config) (*AppsV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientFor(&config)
if err != nil {
return nil, err
}
return &AppsV1alpha1Client{client}, nil
}
// NewForConfigOrDie creates a new AppsV1alpha1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *AppsV1alpha1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new AppsV1alpha1Client for the given RESTClient.
func New(c rest.Interface) *AppsV1alpha1Client {
return &AppsV1alpha1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *AppsV1alpha1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
================================================
FILE: src/go/pkg/client/versioned/typed/apps/v1alpha1/chartassignment.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
"time"
v1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
scheme "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// ChartAssignmentsGetter has a method to return a ChartAssignmentInterface.
// A group's client should implement this interface.
type ChartAssignmentsGetter interface {
ChartAssignments() ChartAssignmentInterface
}
// ChartAssignmentInterface has methods to work with ChartAssignment resources.
type ChartAssignmentInterface interface {
Create(ctx context.Context, chartAssignment *v1alpha1.ChartAssignment, opts v1.CreateOptions) (*v1alpha1.ChartAssignment, error)
Update(ctx context.Context, chartAssignment *v1alpha1.ChartAssignment, opts v1.UpdateOptions) (*v1alpha1.ChartAssignment, error)
UpdateStatus(ctx context.Context, chartAssignment *v1alpha1.ChartAssignment, opts v1.UpdateOptions) (*v1alpha1.ChartAssignment, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ChartAssignment, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ChartAssignmentList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ChartAssignment, err error)
ChartAssignmentExpansion
}
// chartAssignments implements ChartAssignmentInterface
type chartAssignments struct {
client rest.Interface
}
// newChartAssignments returns a ChartAssignments
func newChartAssignments(c *AppsV1alpha1Client) *chartAssignments {
return &chartAssignments{
client: c.RESTClient(),
}
}
// Get takes name of the chartAssignment, and returns the corresponding chartAssignment object, and an error if there is any.
func (c *chartAssignments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ChartAssignment, err error) {
result = &v1alpha1.ChartAssignment{}
err = c.client.Get().
Resource("chartassignments").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of ChartAssignments that match those selectors.
func (c *chartAssignments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ChartAssignmentList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.ChartAssignmentList{}
err = c.client.Get().
Resource("chartassignments").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested chartAssignments.
func (c *chartAssignments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Resource("chartassignments").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a chartAssignment and creates it. Returns the server's representation of the chartAssignment, and an error, if there is any.
func (c *chartAssignments) Create(ctx context.Context, chartAssignment *v1alpha1.ChartAssignment, opts v1.CreateOptions) (result *v1alpha1.ChartAssignment, err error) {
result = &v1alpha1.ChartAssignment{}
err = c.client.Post().
Resource("chartassignments").
VersionedParams(&opts, scheme.ParameterCodec).
Body(chartAssignment).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a chartAssignment and updates it. Returns the server's representation of the chartAssignment, and an error, if there is any.
func (c *chartAssignments) Update(ctx context.Context, chartAssignment *v1alpha1.ChartAssignment, opts v1.UpdateOptions) (result *v1alpha1.ChartAssignment, err error) {
result = &v1alpha1.ChartAssignment{}
err = c.client.Put().
Resource("chartassignments").
Name(chartAssignment.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(chartAssignment).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *chartAssignments) UpdateStatus(ctx context.Context, chartAssignment *v1alpha1.ChartAssignment, opts v1.UpdateOptions) (result *v1alpha1.ChartAssignment, err error) {
result = &v1alpha1.ChartAssignment{}
err = c.client.Put().
Resource("chartassignments").
Name(chartAssignment.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(chartAssignment).
Do(ctx).
Into(result)
return
}
// Delete takes name of the chartAssignment and deletes it. Returns an error if one occurs.
func (c *chartAssignments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Resource("chartassignments").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *chartAssignments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Resource("chartassignments").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched chartAssignment.
func (c *chartAssignments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ChartAssignment, err error) {
result = &v1alpha1.ChartAssignment{}
err = c.client.Patch(pt).
Resource("chartassignments").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
================================================
FILE: src/go/pkg/client/versioned/typed/apps/v1alpha1/doc.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1alpha1
================================================
FILE: src/go/pkg/client/versioned/typed/apps/v1alpha1/fake/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"fake_app.go",
"fake_approllout.go",
"fake_apps_client.go",
"fake_chartassignment.go",
"fake_resourceset.go",
],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/typed/apps/v1alpha1/fake",
visibility = ["//visibility:public"],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"//src/go/pkg/client/versioned/typed/apps/v1alpha1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/labels:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/schema:go_default_library",
"@io_k8s_apimachinery//pkg/types:go_default_library",
"@io_k8s_apimachinery//pkg/watch:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
"@io_k8s_client_go//testing:go_default_library",
],
)
================================================
FILE: src/go/pkg/client/versioned/typed/apps/v1alpha1/fake/doc.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
// Package fake has the automatically generated clients.
package fake
================================================
FILE: src/go/pkg/client/versioned/typed/apps/v1alpha1/fake/fake_app.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
v1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeApps implements AppInterface
type FakeApps struct {
Fake *FakeAppsV1alpha1
}
var appsResource = schema.GroupVersionResource{Group: "apps.cloudrobotics.com", Version: "v1alpha1", Resource: "apps"}
var appsKind = schema.GroupVersionKind{Group: "apps.cloudrobotics.com", Version: "v1alpha1", Kind: "App"}
// Get takes name of the app, and returns the corresponding app object, and an error if there is any.
func (c *FakeApps) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.App, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootGetAction(appsResource, name), &v1alpha1.App{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.App), err
}
// List takes label and field selectors, and returns the list of Apps that match those selectors.
func (c *FakeApps) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AppList, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootListAction(appsResource, appsKind, opts), &v1alpha1.AppList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.AppList{ListMeta: obj.(*v1alpha1.AppList).ListMeta}
for _, item := range obj.(*v1alpha1.AppList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested apps.
func (c *FakeApps) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewRootWatchAction(appsResource, opts))
}
// Create takes the representation of a app and creates it. Returns the server's representation of the app, and an error, if there is any.
func (c *FakeApps) Create(ctx context.Context, app *v1alpha1.App, opts v1.CreateOptions) (result *v1alpha1.App, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootCreateAction(appsResource, app), &v1alpha1.App{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.App), err
}
// Update takes the representation of a app and updates it. Returns the server's representation of the app, and an error, if there is any.
func (c *FakeApps) Update(ctx context.Context, app *v1alpha1.App, opts v1.UpdateOptions) (result *v1alpha1.App, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateAction(appsResource, app), &v1alpha1.App{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.App), err
}
// Delete takes name of the app and deletes it. Returns an error if one occurs.
func (c *FakeApps) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewRootDeleteAction(appsResource, name), &v1alpha1.App{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeApps) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewRootDeleteCollectionAction(appsResource, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha1.AppList{})
return err
}
// Patch applies the patch and returns the patched app.
func (c *FakeApps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.App, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(appsResource, name, pt, data, subresources...), &v1alpha1.App{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.App), err
}
================================================
FILE: src/go/pkg/client/versioned/typed/apps/v1alpha1/fake/fake_approllout.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
v1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeAppRollouts implements AppRolloutInterface
type FakeAppRollouts struct {
Fake *FakeAppsV1alpha1
}
var approlloutsResource = schema.GroupVersionResource{Group: "apps.cloudrobotics.com", Version: "v1alpha1", Resource: "approllouts"}
var approlloutsKind = schema.GroupVersionKind{Group: "apps.cloudrobotics.com", Version: "v1alpha1", Kind: "AppRollout"}
// Get takes name of the appRollout, and returns the corresponding appRollout object, and an error if there is any.
func (c *FakeAppRollouts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AppRollout, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootGetAction(approlloutsResource, name), &v1alpha1.AppRollout{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.AppRollout), err
}
// List takes label and field selectors, and returns the list of AppRollouts that match those selectors.
func (c *FakeAppRollouts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AppRolloutList, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootListAction(approlloutsResource, approlloutsKind, opts), &v1alpha1.AppRolloutList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.AppRolloutList{ListMeta: obj.(*v1alpha1.AppRolloutList).ListMeta}
for _, item := range obj.(*v1alpha1.AppRolloutList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested appRollouts.
func (c *FakeAppRollouts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewRootWatchAction(approlloutsResource, opts))
}
// Create takes the representation of a appRollout and creates it. Returns the server's representation of the appRollout, and an error, if there is any.
func (c *FakeAppRollouts) Create(ctx context.Context, appRollout *v1alpha1.AppRollout, opts v1.CreateOptions) (result *v1alpha1.AppRollout, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootCreateAction(approlloutsResource, appRollout), &v1alpha1.AppRollout{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.AppRollout), err
}
// Update takes the representation of a appRollout and updates it. Returns the server's representation of the appRollout, and an error, if there is any.
func (c *FakeAppRollouts) Update(ctx context.Context, appRollout *v1alpha1.AppRollout, opts v1.UpdateOptions) (result *v1alpha1.AppRollout, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateAction(approlloutsResource, appRollout), &v1alpha1.AppRollout{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.AppRollout), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeAppRollouts) UpdateStatus(ctx context.Context, appRollout *v1alpha1.AppRollout, opts v1.UpdateOptions) (*v1alpha1.AppRollout, error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateSubresourceAction(approlloutsResource, "status", appRollout), &v1alpha1.AppRollout{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.AppRollout), err
}
// Delete takes name of the appRollout and deletes it. Returns an error if one occurs.
func (c *FakeAppRollouts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewRootDeleteAction(approlloutsResource, name), &v1alpha1.AppRollout{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeAppRollouts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewRootDeleteCollectionAction(approlloutsResource, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha1.AppRolloutList{})
return err
}
// Patch applies the patch and returns the patched appRollout.
func (c *FakeAppRollouts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AppRollout, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(approlloutsResource, name, pt, data, subresources...), &v1alpha1.AppRollout{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.AppRollout), err
}
================================================
FILE: src/go/pkg/client/versioned/typed/apps/v1alpha1/fake/fake_apps_client.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/typed/apps/v1alpha1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeAppsV1alpha1 struct {
*testing.Fake
}
func (c *FakeAppsV1alpha1) Apps() v1alpha1.AppInterface {
return &FakeApps{c}
}
func (c *FakeAppsV1alpha1) AppRollouts() v1alpha1.AppRolloutInterface {
return &FakeAppRollouts{c}
}
func (c *FakeAppsV1alpha1) ChartAssignments() v1alpha1.ChartAssignmentInterface {
return &FakeChartAssignments{c}
}
func (c *FakeAppsV1alpha1) ResourceSets() v1alpha1.ResourceSetInterface {
return &FakeResourceSets{c}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeAppsV1alpha1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}
================================================
FILE: src/go/pkg/client/versioned/typed/apps/v1alpha1/fake/fake_chartassignment.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
v1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeChartAssignments implements ChartAssignmentInterface
type FakeChartAssignments struct {
Fake *FakeAppsV1alpha1
}
var chartassignmentsResource = schema.GroupVersionResource{Group: "apps.cloudrobotics.com", Version: "v1alpha1", Resource: "chartassignments"}
var chartassignmentsKind = schema.GroupVersionKind{Group: "apps.cloudrobotics.com", Version: "v1alpha1", Kind: "ChartAssignment"}
// Get takes name of the chartAssignment, and returns the corresponding chartAssignment object, and an error if there is any.
func (c *FakeChartAssignments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ChartAssignment, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootGetAction(chartassignmentsResource, name), &v1alpha1.ChartAssignment{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ChartAssignment), err
}
// List takes label and field selectors, and returns the list of ChartAssignments that match those selectors.
func (c *FakeChartAssignments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ChartAssignmentList, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootListAction(chartassignmentsResource, chartassignmentsKind, opts), &v1alpha1.ChartAssignmentList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.ChartAssignmentList{ListMeta: obj.(*v1alpha1.ChartAssignmentList).ListMeta}
for _, item := range obj.(*v1alpha1.ChartAssignmentList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested chartAssignments.
func (c *FakeChartAssignments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewRootWatchAction(chartassignmentsResource, opts))
}
// Create takes the representation of a chartAssignment and creates it. Returns the server's representation of the chartAssignment, and an error, if there is any.
func (c *FakeChartAssignments) Create(ctx context.Context, chartAssignment *v1alpha1.ChartAssignment, opts v1.CreateOptions) (result *v1alpha1.ChartAssignment, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootCreateAction(chartassignmentsResource, chartAssignment), &v1alpha1.ChartAssignment{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ChartAssignment), err
}
// Update takes the representation of a chartAssignment and updates it. Returns the server's representation of the chartAssignment, and an error, if there is any.
func (c *FakeChartAssignments) Update(ctx context.Context, chartAssignment *v1alpha1.ChartAssignment, opts v1.UpdateOptions) (result *v1alpha1.ChartAssignment, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateAction(chartassignmentsResource, chartAssignment), &v1alpha1.ChartAssignment{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ChartAssignment), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeChartAssignments) UpdateStatus(ctx context.Context, chartAssignment *v1alpha1.ChartAssignment, opts v1.UpdateOptions) (*v1alpha1.ChartAssignment, error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateSubresourceAction(chartassignmentsResource, "status", chartAssignment), &v1alpha1.ChartAssignment{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ChartAssignment), err
}
// Delete takes name of the chartAssignment and deletes it. Returns an error if one occurs.
func (c *FakeChartAssignments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewRootDeleteAction(chartassignmentsResource, name), &v1alpha1.ChartAssignment{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeChartAssignments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewRootDeleteCollectionAction(chartassignmentsResource, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha1.ChartAssignmentList{})
return err
}
// Patch applies the patch and returns the patched chartAssignment.
func (c *FakeChartAssignments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ChartAssignment, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(chartassignmentsResource, name, pt, data, subresources...), &v1alpha1.ChartAssignment{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ChartAssignment), err
}
================================================
FILE: src/go/pkg/client/versioned/typed/apps/v1alpha1/fake/fake_resourceset.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
v1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeResourceSets implements ResourceSetInterface
type FakeResourceSets struct {
Fake *FakeAppsV1alpha1
}
var resourcesetsResource = schema.GroupVersionResource{Group: "apps.cloudrobotics.com", Version: "v1alpha1", Resource: "resourcesets"}
var resourcesetsKind = schema.GroupVersionKind{Group: "apps.cloudrobotics.com", Version: "v1alpha1", Kind: "ResourceSet"}
// Get takes name of the resourceSet, and returns the corresponding resourceSet object, and an error if there is any.
func (c *FakeResourceSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceSet, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootGetAction(resourcesetsResource, name), &v1alpha1.ResourceSet{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ResourceSet), err
}
// List takes label and field selectors, and returns the list of ResourceSets that match those selectors.
func (c *FakeResourceSets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceSetList, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootListAction(resourcesetsResource, resourcesetsKind, opts), &v1alpha1.ResourceSetList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.ResourceSetList{ListMeta: obj.(*v1alpha1.ResourceSetList).ListMeta}
for _, item := range obj.(*v1alpha1.ResourceSetList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested resourceSets.
func (c *FakeResourceSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewRootWatchAction(resourcesetsResource, opts))
}
// Create takes the representation of a resourceSet and creates it. Returns the server's representation of the resourceSet, and an error, if there is any.
func (c *FakeResourceSets) Create(ctx context.Context, resourceSet *v1alpha1.ResourceSet, opts v1.CreateOptions) (result *v1alpha1.ResourceSet, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootCreateAction(resourcesetsResource, resourceSet), &v1alpha1.ResourceSet{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ResourceSet), err
}
// Update takes the representation of a resourceSet and updates it. Returns the server's representation of the resourceSet, and an error, if there is any.
func (c *FakeResourceSets) Update(ctx context.Context, resourceSet *v1alpha1.ResourceSet, opts v1.UpdateOptions) (result *v1alpha1.ResourceSet, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateAction(resourcesetsResource, resourceSet), &v1alpha1.ResourceSet{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ResourceSet), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeResourceSets) UpdateStatus(ctx context.Context, resourceSet *v1alpha1.ResourceSet, opts v1.UpdateOptions) (*v1alpha1.ResourceSet, error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateSubresourceAction(resourcesetsResource, "status", resourceSet), &v1alpha1.ResourceSet{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ResourceSet), err
}
// Delete takes name of the resourceSet and deletes it. Returns an error if one occurs.
func (c *FakeResourceSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewRootDeleteAction(resourcesetsResource, name), &v1alpha1.ResourceSet{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeResourceSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewRootDeleteCollectionAction(resourcesetsResource, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha1.ResourceSetList{})
return err
}
// Patch applies the patch and returns the patched resourceSet.
func (c *FakeResourceSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceSet, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(resourcesetsResource, name, pt, data, subresources...), &v1alpha1.ResourceSet{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ResourceSet), err
}
================================================
FILE: src/go/pkg/client/versioned/typed/apps/v1alpha1/generated_expansion.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
type AppExpansion interface{}
type AppRolloutExpansion interface{}
type ChartAssignmentExpansion interface{}
type ResourceSetExpansion interface{}
================================================
FILE: src/go/pkg/client/versioned/typed/apps/v1alpha1/resourceset.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
"time"
v1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
scheme "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// ResourceSetsGetter has a method to return a ResourceSetInterface.
// A group's client should implement this interface.
type ResourceSetsGetter interface {
ResourceSets() ResourceSetInterface
}
// ResourceSetInterface has methods to work with ResourceSet resources.
type ResourceSetInterface interface {
Create(ctx context.Context, resourceSet *v1alpha1.ResourceSet, opts v1.CreateOptions) (*v1alpha1.ResourceSet, error)
Update(ctx context.Context, resourceSet *v1alpha1.ResourceSet, opts v1.UpdateOptions) (*v1alpha1.ResourceSet, error)
UpdateStatus(ctx context.Context, resourceSet *v1alpha1.ResourceSet, opts v1.UpdateOptions) (*v1alpha1.ResourceSet, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceSet, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceSetList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceSet, err error)
ResourceSetExpansion
}
// resourceSets implements ResourceSetInterface
type resourceSets struct {
client rest.Interface
}
// newResourceSets returns a ResourceSets
func newResourceSets(c *AppsV1alpha1Client) *resourceSets {
return &resourceSets{
client: c.RESTClient(),
}
}
// Get takes name of the resourceSet, and returns the corresponding resourceSet object, and an error if there is any.
func (c *resourceSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceSet, err error) {
result = &v1alpha1.ResourceSet{}
err = c.client.Get().
Resource("resourcesets").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of ResourceSets that match those selectors.
func (c *resourceSets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceSetList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.ResourceSetList{}
err = c.client.Get().
Resource("resourcesets").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested resourceSets.
func (c *resourceSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Resource("resourcesets").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a resourceSet and creates it. Returns the server's representation of the resourceSet, and an error, if there is any.
func (c *resourceSets) Create(ctx context.Context, resourceSet *v1alpha1.ResourceSet, opts v1.CreateOptions) (result *v1alpha1.ResourceSet, err error) {
result = &v1alpha1.ResourceSet{}
err = c.client.Post().
Resource("resourcesets").
VersionedParams(&opts, scheme.ParameterCodec).
Body(resourceSet).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a resourceSet and updates it. Returns the server's representation of the resourceSet, and an error, if there is any.
func (c *resourceSets) Update(ctx context.Context, resourceSet *v1alpha1.ResourceSet, opts v1.UpdateOptions) (result *v1alpha1.ResourceSet, err error) {
result = &v1alpha1.ResourceSet{}
err = c.client.Put().
Resource("resourcesets").
Name(resourceSet.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(resourceSet).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *resourceSets) UpdateStatus(ctx context.Context, resourceSet *v1alpha1.ResourceSet, opts v1.UpdateOptions) (result *v1alpha1.ResourceSet, err error) {
result = &v1alpha1.ResourceSet{}
err = c.client.Put().
Resource("resourcesets").
Name(resourceSet.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(resourceSet).
Do(ctx).
Into(result)
return
}
// Delete takes name of the resourceSet and deletes it. Returns an error if one occurs.
func (c *resourceSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Resource("resourcesets").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *resourceSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Resource("resourcesets").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched resourceSet.
func (c *resourceSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceSet, err error) {
result = &v1alpha1.ResourceSet{}
err = c.client.Patch(pt).
Resource("resourcesets").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
================================================
FILE: src/go/pkg/client/versioned/typed/registry/v1alpha1/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"generated_expansion.go",
"registry_client.go",
"robot.go",
],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/typed/registry/v1alpha1",
visibility = ["//visibility:public"],
deps = [
"//src/go/pkg/apis/registry/v1alpha1:go_default_library",
"//src/go/pkg/client/versioned/scheme:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/types:go_default_library",
"@io_k8s_apimachinery//pkg/watch:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
],
)
================================================
FILE: src/go/pkg/client/versioned/typed/registry/v1alpha1/doc.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1alpha1
================================================
FILE: src/go/pkg/client/versioned/typed/registry/v1alpha1/fake/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"fake_registry_client.go",
"fake_robot.go",
],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/typed/registry/v1alpha1/fake",
visibility = ["//visibility:public"],
deps = [
"//src/go/pkg/apis/registry/v1alpha1:go_default_library",
"//src/go/pkg/client/versioned/typed/registry/v1alpha1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/labels:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/schema:go_default_library",
"@io_k8s_apimachinery//pkg/types:go_default_library",
"@io_k8s_apimachinery//pkg/watch:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
"@io_k8s_client_go//testing:go_default_library",
],
)
================================================
FILE: src/go/pkg/client/versioned/typed/registry/v1alpha1/fake/doc.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
// Package fake has the automatically generated clients.
package fake
================================================
FILE: src/go/pkg/client/versioned/typed/registry/v1alpha1/fake/fake_registry_client.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/typed/registry/v1alpha1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeRegistryV1alpha1 struct {
*testing.Fake
}
func (c *FakeRegistryV1alpha1) Robots(namespace string) v1alpha1.RobotInterface {
return &FakeRobots{c, namespace}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeRegistryV1alpha1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}
================================================
FILE: src/go/pkg/client/versioned/typed/registry/v1alpha1/fake/fake_robot.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
v1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/registry/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeRobots implements RobotInterface
type FakeRobots struct {
Fake *FakeRegistryV1alpha1
ns string
}
var robotsResource = schema.GroupVersionResource{Group: "registry.cloudrobotics.com", Version: "v1alpha1", Resource: "robots"}
var robotsKind = schema.GroupVersionKind{Group: "registry.cloudrobotics.com", Version: "v1alpha1", Kind: "Robot"}
// Get takes name of the robot, and returns the corresponding robot object, and an error if there is any.
func (c *FakeRobots) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Robot, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(robotsResource, c.ns, name), &v1alpha1.Robot{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Robot), err
}
// List takes label and field selectors, and returns the list of Robots that match those selectors.
func (c *FakeRobots) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RobotList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(robotsResource, robotsKind, c.ns, opts), &v1alpha1.RobotList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.RobotList{ListMeta: obj.(*v1alpha1.RobotList).ListMeta}
for _, item := range obj.(*v1alpha1.RobotList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested robots.
func (c *FakeRobots) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(robotsResource, c.ns, opts))
}
// Create takes the representation of a robot and creates it. Returns the server's representation of the robot, and an error, if there is any.
func (c *FakeRobots) Create(ctx context.Context, robot *v1alpha1.Robot, opts v1.CreateOptions) (result *v1alpha1.Robot, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(robotsResource, c.ns, robot), &v1alpha1.Robot{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Robot), err
}
// Update takes the representation of a robot and updates it. Returns the server's representation of the robot, and an error, if there is any.
func (c *FakeRobots) Update(ctx context.Context, robot *v1alpha1.Robot, opts v1.UpdateOptions) (result *v1alpha1.Robot, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(robotsResource, c.ns, robot), &v1alpha1.Robot{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Robot), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeRobots) UpdateStatus(ctx context.Context, robot *v1alpha1.Robot, opts v1.UpdateOptions) (*v1alpha1.Robot, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(robotsResource, "status", c.ns, robot), &v1alpha1.Robot{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Robot), err
}
// Delete takes name of the robot and deletes it. Returns an error if one occurs.
func (c *FakeRobots) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(robotsResource, c.ns, name), &v1alpha1.Robot{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeRobots) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(robotsResource, c.ns, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha1.RobotList{})
return err
}
// Patch applies the patch and returns the patched robot.
func (c *FakeRobots) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Robot, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(robotsResource, c.ns, name, pt, data, subresources...), &v1alpha1.Robot{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Robot), err
}
================================================
FILE: src/go/pkg/client/versioned/typed/registry/v1alpha1/generated_expansion.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
type RobotExpansion interface{}
================================================
FILE: src/go/pkg/client/versioned/typed/registry/v1alpha1/registry_client.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/registry/v1alpha1"
"github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type RegistryV1alpha1Interface interface {
RESTClient() rest.Interface
RobotsGetter
}
// RegistryV1alpha1Client is used to interact with features provided by the registry.cloudrobotics.com group.
type RegistryV1alpha1Client struct {
restClient rest.Interface
}
func (c *RegistryV1alpha1Client) Robots(namespace string) RobotInterface {
return newRobots(c, namespace)
}
// NewForConfig creates a new RegistryV1alpha1Client for the given config.
func NewForConfig(c *rest.Config) (*RegistryV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientFor(&config)
if err != nil {
return nil, err
}
return &RegistryV1alpha1Client{client}, nil
}
// NewForConfigOrDie creates a new RegistryV1alpha1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *RegistryV1alpha1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new RegistryV1alpha1Client for the given RESTClient.
func New(c rest.Interface) *RegistryV1alpha1Client {
return &RegistryV1alpha1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *RegistryV1alpha1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
================================================
FILE: src/go/pkg/client/versioned/typed/registry/v1alpha1/robot.go
================================================
// Copyright 2026 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
"time"
v1alpha1 "github.com/googlecloudrobotics/core/src/go/pkg/apis/registry/v1alpha1"
scheme "github.com/googlecloudrobotics/core/src/go/pkg/client/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// RobotsGetter has a method to return a RobotInterface.
// A group's client should implement this interface.
type RobotsGetter interface {
Robots(namespace string) RobotInterface
}
// RobotInterface has methods to work with Robot resources.
type RobotInterface interface {
Create(ctx context.Context, robot *v1alpha1.Robot, opts v1.CreateOptions) (*v1alpha1.Robot, error)
Update(ctx context.Context, robot *v1alpha1.Robot, opts v1.UpdateOptions) (*v1alpha1.Robot, error)
UpdateStatus(ctx context.Context, robot *v1alpha1.Robot, opts v1.UpdateOptions) (*v1alpha1.Robot, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Robot, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RobotList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Robot, err error)
RobotExpansion
}
// robots implements RobotInterface
type robots struct {
client rest.Interface
ns string
}
// newRobots returns a Robots
func newRobots(c *RegistryV1alpha1Client, namespace string) *robots {
return &robots{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the robot, and returns the corresponding robot object, and an error if there is any.
func (c *robots) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Robot, err error) {
result = &v1alpha1.Robot{}
err = c.client.Get().
Namespace(c.ns).
Resource("robots").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of Robots that match those selectors.
func (c *robots) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RobotList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.RobotList{}
err = c.client.Get().
Namespace(c.ns).
Resource("robots").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested robots.
func (c *robots) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("robots").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a robot and creates it. Returns the server's representation of the robot, and an error, if there is any.
func (c *robots) Create(ctx context.Context, robot *v1alpha1.Robot, opts v1.CreateOptions) (result *v1alpha1.Robot, err error) {
result = &v1alpha1.Robot{}
err = c.client.Post().
Namespace(c.ns).
Resource("robots").
VersionedParams(&opts, scheme.ParameterCodec).
Body(robot).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a robot and updates it. Returns the server's representation of the robot, and an error, if there is any.
func (c *robots) Update(ctx context.Context, robot *v1alpha1.Robot, opts v1.UpdateOptions) (result *v1alpha1.Robot, err error) {
result = &v1alpha1.Robot{}
err = c.client.Put().
Namespace(c.ns).
Resource("robots").
Name(robot.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(robot).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *robots) UpdateStatus(ctx context.Context, robot *v1alpha1.Robot, opts v1.UpdateOptions) (result *v1alpha1.Robot, err error) {
result = &v1alpha1.Robot{}
err = c.client.Put().
Namespace(c.ns).
Resource("robots").
Name(robot.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(robot).
Do(ctx).
Into(result)
return
}
// Delete takes name of the robot and deletes it. Returns an error if one occurs.
func (c *robots) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("robots").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *robots) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("robots").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched robot.
func (c *robots) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Robot, err error) {
result = &v1alpha1.Robot{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("robots").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
================================================
FILE: src/go/pkg/configutil/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = ["config_reader.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/configutil",
deps = [
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@com_google_cloud_go_storage//:go_default_library",
"@org_golang_google_api//option:go_default_library",
],
)
go_test(
name = "go_default_test",
size = "small",
srcs = ["config_reader_test.go"],
embed = [":go_default_library"],
visibility = ["//visibility:private"],
)
================================================
FILE: src/go/pkg/configutil/config_reader.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package configutil
import (
"bufio"
"context"
"io"
"log/slog"
"regexp"
"strconv"
"strings"
"cloud.google.com/go/storage"
"github.com/googlecloudrobotics/ilog"
"google.golang.org/api/option"
)
// Unescapes a bash string. Only supports the following three patterns:
//
// Hello\ \w\o\r\l\d -> Hello world
// "Hello \"world\"" -> Hello "world"
// 'Hello '\''world'\''' -> Hello 'world'
func bashUnescape(s string) string {
if len(s) <= 1 {
return s
}
if s[0] == '\'' && s[len(s)-1] == '\'' {
return strings.ReplaceAll(s[1:len(s)-1], `'\''`, `'`)
}
if s[0] == '"' && s[len(s)-1] == '"' {
s = s[1 : len(s)-1]
// Unescape \\, \$, \", \`, and \!.
re := regexp.MustCompile("\\\\([\\$\"`!\\\\])")
return re.ReplaceAllString(s, "$1")
}
re := regexp.MustCompile(`\\(.)`)
return re.ReplaceAllString(s, "$1")
}
func getConfigFromReader(reader io.Reader) (map[string]string, error) {
re := regexp.MustCompile(`^\s*([\w]*)=(.*?)\s*$`)
s := bufio.NewScanner(reader)
vars := make(map[string]string)
for s.Scan() {
match := re.FindStringSubmatch(s.Text())
if len(match) >= 3 {
// TODO(skopecki) Consider allowing variable substitution (e.g., FOOBAR="${FOO}/bar")
vars[match[1]] = bashUnescape(match[2])
}
}
if err := s.Err(); err != nil {
return nil, err
}
return vars, nil
}
func setDefaultVars(vars map[string]string) {
// Keep default in sync with scripts/include-config.sh
if vars["CLOUD_ROBOTICS_CONTAINER_REGISTRY"] == "" {
vars["CLOUD_ROBOTICS_CONTAINER_REGISTRY"] = "gcr.io/" + vars["GCP_PROJECT_ID"]
}
}
// ReadConfig reads the config.sh from the cloud storage of the given project.
// All variables specified in the config are returned as dictionary.
// Uses the following defaults if the variables are not set:
//
// CLOUD_ROBOTICS_CONTAINER_REGISTRY="gcr.io/"
func ReadConfig(project string, opts ...option.ClientOption) (map[string]string, error) {
ctx := context.Background()
client, err := storage.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
bkt := client.Bucket(project + "-cloud-robotics-config")
reader, err := bkt.Object("config.sh").NewReader(ctx)
if err != nil {
return nil, err
}
defer reader.Close()
vars, err := getConfigFromReader(reader)
if err != nil {
return nil, err
}
setDefaultVars(vars)
return vars, nil
}
// GetBoolean returns the config value converted to a boolean. It returns the default if the config
// value has not been set or could not be converted.
func GetBoolean(vars map[string]string, key string, def bool) bool {
val, ok := vars[key]
if ok {
if b, err := strconv.ParseBool(val); err == nil {
return b
} else {
slog.Error("failed to convert config to boolean",
slog.String("Key", key),
slog.String("Value", val),
ilog.Err(err))
}
}
return def
}
================================================
FILE: src/go/pkg/configutil/config_reader_test.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package configutil
import (
"reflect"
"strings"
"testing"
)
func TestBashUnescape(t *testing.T) {
tests := []struct {
s string
want string
}{
{
s: `foo\ \b\a\r\!`,
want: `foo bar!`,
},
{
s: `"foo\ \"bar\"\!"`,
want: `foo\ "bar"!`,
},
{
s: `'foo\ '\''bar'\''\!'`,
want: `foo\ 'bar'\!`,
},
{
s: `"foo\ \b\a\r\!'`,
want: `"foo bar!'`,
},
}
for i, tc := range tests {
if got := bashUnescape(tc.s); got != tc.want {
t.Errorf("[%d] bashUnescape(`%s`) = `%s`', want `%s`", i, tc.s, got, tc.want)
}
}
}
func TestGetConfigFromReader(t *testing.T) {
s := `
FOO="foo"
FOO_BAR1=foo\ bar
BAR=buzz
BAR=bar
# NOPE1="don't take this"
NOPE2 = "or this"`
r := strings.NewReader(s)
want := map[string]string{
"FOO": "foo",
"FOO_BAR1": "foo bar",
"BAR": "bar",
}
got, err := getConfigFromReader(r)
if err != nil {
t.Fatalf("Got error while reading config: %v", err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("getConfigFromReader(`%s`) =\n%q\n\nwant:\n%q", s, got, want)
}
}
func TestSetDefaultVars(t *testing.T) {
tests := []struct {
v map[string]string
want map[string]string
}{
{
v: map[string]string{
"GCP_PROJECT_ID": "foo",
},
want: map[string]string{
"GCP_PROJECT_ID": "foo",
"CLOUD_ROBOTICS_CONTAINER_REGISTRY": "gcr.io/foo",
},
},
{
v: map[string]string{
"GCP_PROJECT_ID": "foo",
"CLOUD_ROBOTICS_CONTAINER_REGISTRY": "gcr.io/bar",
},
want: map[string]string{
"GCP_PROJECT_ID": "foo",
"CLOUD_ROBOTICS_CONTAINER_REGISTRY": "gcr.io/bar",
},
},
}
for i, tc := range tests {
setDefaultVars(tc.v)
if !reflect.DeepEqual(tc.v, tc.want) {
t.Errorf("[%d], got: %v\nwant %v", i, tc.v, tc.want)
}
}
}
func TestGetBoolean(t *testing.T) {
tests := []struct {
v map[string]string
def bool
want bool
}{
{ // good key present
v: map[string]string{"FLAG": "true"},
def: false,
want: true,
},
{ // no values, return def
v: map[string]string{},
def: false,
want: false,
},
{ // key absent, return def
v: map[string]string{"OPTION": "true"},
def: false,
want: false,
},
{ // good key, but bad value present, return def
v: map[string]string{"FLAG": "I am not a flag"},
def: false,
want: false,
},
}
for i, tc := range tests {
r := GetBoolean(tc.v, "FLAG", tc.def)
if r != tc.want {
t.Errorf("[%d] got: %v\nwant %v", i, r, tc.want)
}
}
}
================================================
FILE: src/go/pkg/controller/approllout/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["controller.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/controller/approllout",
visibility = ["//visibility:public"],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"//src/go/pkg/apis/registry/v1alpha1:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_apimachinery//pkg/api/errors:go_default_library",
"@io_k8s_apimachinery//pkg/api/validation:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/labels:go_default_library",
"@io_k8s_apimachinery//pkg/runtime:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/serializer:go_default_library",
"@io_k8s_apimachinery//pkg/types:go_default_library",
"@io_k8s_client_go//util/workqueue:go_default_library",
"@io_k8s_helm//pkg/chartutil:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/client:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/controller:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/event:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/handler:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/manager:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/reconcile:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/source:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/webhook/admission:go_default_library",
"@io_k8s_sigs_yaml//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["controller_test.go"],
embed = [":go_default_library"],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"//src/go/pkg/apis/registry/v1alpha1:go_default_library",
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_helm//pkg/chartutil:go_default_library",
"@io_k8s_sigs_yaml//:go_default_library",
],
)
================================================
FILE: src/go/pkg/controller/approllout/controller.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package approllout
import (
"bytes"
"context"
"fmt"
"log/slog"
"net/http"
"reflect"
"sort"
"strings"
"time"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
registry "github.com/googlecloudrobotics/core/src/go/pkg/apis/registry/v1alpha1"
"github.com/googlecloudrobotics/ilog"
"github.com/pkg/errors"
core "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/validation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/workqueue"
"k8s.io/helm/pkg/chartutil"
kclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"sigs.k8s.io/yaml"
)
const (
fieldIndexOwners = "metadata.ownerReferences.uid"
fieldIndexAppName = "spec.appName"
labelRobotName = "cloudrobotics.com/robot-name"
)
// Add adds a controller for the AppRollout resource type
// to the manager and server.
func Add(ctx context.Context, mgr manager.Manager, baseValues chartutil.Values) error {
r := &Reconciler{
kube: mgr.GetClient(),
baseValues: baseValues,
}
c, err := controller.New("approllout", mgr, controller.Options{
Reconciler: r,
})
if err != nil {
return errors.Wrap(err, "create controller")
}
err = mgr.GetCache().IndexField(ctx, &apps.ChartAssignment{}, fieldIndexOwners, indexOwnerReferences)
if err != nil {
return errors.Wrap(err, "add field indexer")
}
err = mgr.GetCache().IndexField(ctx, &apps.AppRollout{}, fieldIndexAppName, indexAppName)
if err != nil {
return errors.Wrap(err, "add field indexer")
}
err = c.Watch(
source.Kind(mgr.GetCache(), &apps.AppRollout{}),
&handler.EnqueueRequestForObject{},
)
if err != nil {
return errors.Wrap(err, "watch AppRollouts")
}
// We don't trigger on ChartAssignment creations since it was either ourselves
// or a CA we don't care about anyway.
err = c.Watch(
source.Kind(mgr.GetCache(), &apps.ChartAssignment{}),
// We manually enqueue for the owner reference since handler.EnqueueRequestForOwner
// does not work.
// TODO: There is an associated bug in the controller-runtime but upgrading to include
// https://github.com/kubernetes-sigs/controller-runtime/pull/274 did not resolve the issue.
&handler.Funcs{
DeleteFunc: func(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) {
r.enqueueForOwner(evt.Object, q)
},
UpdateFunc: func(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) {
r.enqueueForOwner(evt.ObjectNew, q)
},
},
)
if err != nil {
return errors.Wrap(err, "watch ChartAssignments")
}
// Determining which rollouts are affected by a robot change is tedious.
// We just enqueue all AppRollouts again.
err = c.Watch(
source.Kind(mgr.GetCache(), ®istry.Robot{}),
// We log robot events for now while b/125308238 persists.
// To mitigate the effects we defer enqueueing in the delete handler
// so the robot ideally reappeared before we reconcile.
&handler.Funcs{
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
slog.Info("AppRollout controller received create event", slog.String("Robot", e.Object.GetName()))
r.enqueueAll(ctx, q)
},
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
// Robots don't have the status subresource enabled. Filter updates that didn't
// change robot name or labels.
change := !reflect.DeepEqual(e.ObjectOld.GetLabels(), e.ObjectNew.GetLabels())
change = change || e.ObjectOld.GetName() != e.ObjectNew.GetName()
if change {
slog.Info("AppRollout controller received update event", slog.String("Robot", e.ObjectNew.GetName()))
r.enqueueAll(ctx, q)
}
},
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {
slog.Info("AppRollout controller received delete event", slog.String("Robot", e.Object.GetName()))
time.AfterFunc(3*time.Second, func() {
r.enqueueAll(ctx, q)
})
},
},
)
if err != nil {
return errors.Wrap(err, "watch Robots")
}
err = c.Watch(
source.Kind(mgr.GetCache(), &apps.App{}),
&handler.Funcs{
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
slog.Info("AppRollout controller received create event", slog.String("App", e.Object.GetName()))
r.enqueueForApp(ctx, e.Object, q)
},
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
slog.Info("AppRollout controller received update event", slog.String("App", e.ObjectNew.GetName()))
r.enqueueForApp(ctx, e.ObjectNew, q)
},
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {
slog.Info("AppRollout controller received delete event", slog.String("App", e.Object.GetName()))
r.enqueueForApp(ctx, e.Object, q)
},
},
)
if err != nil {
return errors.Wrap(err, "watch Apps")
}
return nil
}
// enqueueForApp enqueues all AppRollouts for the given app.
func (r *Reconciler) enqueueForApp(ctx context.Context, m metav1.Object, q workqueue.RateLimitingInterface) {
var rollouts apps.AppRolloutList
err := r.kube.List(ctx, &rollouts, kclient.MatchingFields(map[string]string{fieldIndexAppName: m.GetName()}))
if err != nil {
slog.Error("List AppRollouts failed", slog.String("appName", m.GetName()), ilog.Err(err))
return
}
for _, ar := range rollouts.Items {
q.Add(reconcile.Request{
NamespacedName: types.NamespacedName{Name: ar.Name},
})
}
}
// enqueueForOwner enqueues AppRollouts that are listed in the owner references
// of the given resource metadata.
func (r *Reconciler) enqueueForOwner(m metav1.Object, q workqueue.RateLimitingInterface) {
for _, or := range m.GetOwnerReferences() {
if or.APIVersion == "apps.cloudrobotics.com/v1alpha1" && or.Kind == "AppRollout" {
q.Add(reconcile.Request{
NamespacedName: types.NamespacedName{Name: or.Name},
})
}
}
}
// enqueueAll enqueues all AppRollouts.
func (r *Reconciler) enqueueAll(ctx context.Context, q workqueue.RateLimitingInterface) {
var rollouts apps.AppRolloutList
err := r.kube.List(ctx, &rollouts)
if err != nil {
slog.Error("List AppRollouts failed", ilog.Err(err))
return
}
for _, ar := range rollouts.Items {
q.Add(reconcile.Request{
NamespacedName: types.NamespacedName{Name: ar.Name},
})
}
}
// Reconciler provides an idempotent function that brings the cluster into a
// state consistent with the specification of an AppRollout.
type Reconciler struct {
kube kclient.Client
baseValues chartutil.Values
}
func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
var ar apps.AppRollout
err := r.kube.Get(ctx, req.NamespacedName, &ar)
if k8serrors.IsNotFound(err) {
// AppRollout was already deleted, nothing to do.
return reconcile.Result{}, nil
} else if err != nil {
return reconcile.Result{}, errors.Wrapf(err, "get AppRollout %q", req)
}
return r.reconcile(ctx, &ar)
}
func (r *Reconciler) reconcile(ctx context.Context, ar *apps.AppRollout) (reconcile.Result, error) {
slog.Info("Reconcile AppRollout",
slog.String("Name", ar.Name),
slog.String("Version", ar.ResourceVersion))
// Apply spec.
var (
curCAs apps.ChartAssignmentList
al apps.AppList
robots registry.RobotList
)
ar.Status.ObservedGeneration = ar.Generation
ar.Status.Assignments = 0
ar.Status.SettledAssignments = 0
ar.Status.ReadyAssignments = 0
ar.Status.FailedAssignments = 0
// TODO(coconutruben): consider moving these into a testable function.
// Moving them into generateChartAssignments requires rewriting the
// existing tests.
err := r.kube.List(ctx, &curCAs, kclient.MatchingFields(map[string]string{fieldIndexOwners: string(ar.UID)}))
if err != nil {
return reconcile.Result{}, errors.Wrapf(err, "list ChartAssignments for owner UID %s", ar.UID)
}
if err := r.kube.List(ctx, &robots); err != nil {
return reconcile.Result{}, errors.Wrap(err, "list all Robots")
}
if err := r.kube.List(ctx, &al, kclient.MatchingLabels{labelAppName: ar.Spec.AppName}); err != nil {
return reconcile.Result{}, errors.Wrap(err, "list all App Versions")
}
// There might be old Apps laying around that do not conform to this
// methodology yet. However, those Apps also do not use the version
// mechanism. Therefore, we can just look for that App once, and put
// it into our map if it's not there yet.
appFound := func(name string) bool {
for idx := range al.Items {
if al.Items[idx].Name == name {
return true
}
}
return false
}
if !appFound(ar.Spec.AppName) {
// We might not need the canonical app at all, if all the rollout
// entries are asking for versioned apps. Do not fail here yet,
// if we needed it, it will fail in generateChartAssignments
app := apps.App{}
if err := r.kube.Get(ctx, kclient.ObjectKey{Name: ar.Spec.AppName}, &app); err == nil {
// If we're here, the App is both:
// - app.Name == ar.Spec.AppName.
// - not found using the app-name label. This object is a copy so
// we can just write the label in there, so generateChartAssignments
// can safely assume the label is always there.
if app.Labels == nil {
app.Labels = map[string]string{}
}
app.Labels[labelAppName] = app.Name
app.Labels[labelAppVersion] = ""
al.Items = append(al.Items, app)
}
}
wantCAs, err := generateChartAssignments(al.Items, robots.Items, ar, r.baseValues)
if err != nil {
if _, ok := errors.Cause(err).(errRobotSelectorOverlap); ok {
return reconcile.Result{}, r.updateErrorStatus(ctx, ar, err.Error())
}
return reconcile.Result{}, errors.Wrap(err, "generate ChartAssignments")
}
// ChartAssignments that are no longer wanted. We pre-populate it with
// all existing CAs and remove those that we want to keep
dropCAs := map[string]apps.ChartAssignment{}
for _, ca := range curCAs.Items {
dropCAs[ca.Name] = ca
}
// Create or update ChartAssignments. Only update ChartAssignments if the rollout's
// spec or labels have been updated.
for _, ca := range wantCAs {
_true := true
setOwnerReference(&ca.ObjectMeta, metav1.OwnerReference{
APIVersion: ar.APIVersion,
Kind: ar.Kind,
Name: ar.Name,
UID: ar.UID,
BlockOwnerDeletion: &_true,
Controller: &_true,
})
prev, exists := dropCAs[ca.Name]
delete(dropCAs, ca.Name)
if !exists {
if err := r.kube.Create(ctx, ca); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "create ChartAssignment %q", ca.Name)
}
slog.Info("Created ChartAssignment", slog.String("Name", ca.Name))
continue
}
if changed, err := chartAssignmentChanged(&prev, ca); err != nil {
return reconcile.Result{}, errors.Wrap(err, "check ChartAssignment changed")
} else if !changed {
continue
}
ca.ResourceVersion = prev.ResourceVersion
if err := r.kube.Update(ctx, ca); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "update ChartAssignment %q", ca.Name)
}
slog.Info("Updated ChartAssignment", slog.String("Name", ca.Name))
}
// Delete obsolete assignments.
for _, ca := range dropCAs {
if err := r.kube.Delete(ctx, &ca); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "delete ChartAssignment %q", ca.Name)
}
slog.Info("Deleted ChartAssignment", slog.String("Name", ca.Name))
}
setStatus(ar, len(wantCAs), curCAs.Items)
if err := r.kube.Status().Update(ctx, ar); err != nil {
return reconcile.Result{}, errors.Wrap(err, "update status")
}
return reconcile.Result{}, nil
}
func (r *Reconciler) updateErrorStatus(ctx context.Context, ar *apps.AppRollout, msg string) error {
setCondition(ar, apps.AppRolloutConditionSettled, core.ConditionFalse, msg)
if err := r.kube.Status().Update(ctx, ar); err != nil {
return errors.Wrap(err, "update status")
}
return nil
}
func setStatus(ar *apps.AppRollout, numWantCAs int, curCAs []apps.ChartAssignment) {
// Update status.
ar.Status.Assignments = int64(numWantCAs)
for _, ca := range curCAs {
switch ca.Status.Phase {
case apps.ChartAssignmentPhaseReady:
ar.Status.ReadyAssignments++
ar.Status.SettledAssignments++
case apps.ChartAssignmentPhaseSettled:
ar.Status.SettledAssignments++
case apps.ChartAssignmentPhaseFailed:
ar.Status.FailedAssignments++
}
}
if got, want := ar.Status.SettledAssignments, ar.Status.Assignments; got == want {
setCondition(ar, apps.AppRolloutConditionSettled, core.ConditionTrue, "")
} else {
setCondition(ar, apps.AppRolloutConditionSettled, core.ConditionFalse,
fmt.Sprintf("%d/%d ChartAssignments settled", got, want))
}
if got, want := ar.Status.ReadyAssignments, ar.Status.Assignments; got == want {
setCondition(ar, apps.AppRolloutConditionReady, core.ConditionTrue, "")
} else {
setCondition(ar, apps.AppRolloutConditionReady, core.ConditionFalse,
fmt.Sprintf("%d/%d ChartAssignments ready", got, want))
}
}
// setCondition adds or updates a condition. Existing conditions are detected based on the Type field.
func setCondition(ar *apps.AppRollout, t apps.AppRolloutConditionType, s core.ConditionStatus, msg string) {
now := metav1.Now()
for i, c := range ar.Status.Conditions {
if c.Type != t {
continue
}
// Update existing condition.
if c.Status != s || c.Message != msg {
c.LastUpdateTime = now
}
if c.Status != s {
c.LastTransitionTime = now
}
c.Message = msg
c.Status = s
ar.Status.Conditions[i] = c
return
}
// Condition set for the first time.
ar.Status.Conditions = append(ar.Status.Conditions, apps.AppRolloutCondition{
Type: t,
LastUpdateTime: now,
LastTransitionTime: now,
Status: s,
Message: msg,
})
}
// chartAssignmentChanged returns true if the CA's labels, annotations, or spec changed.
func chartAssignmentChanged(prev, cur *apps.ChartAssignment) (bool, error) {
if !reflect.DeepEqual(prev.Labels, cur.Labels) {
return true, nil
}
if !reflect.DeepEqual(prev.Annotations, cur.Annotations) {
return true, nil
}
prevSpec, err := yaml.Marshal(prev.Spec)
if err != nil {
return false, err
}
curSpec, err := yaml.Marshal(cur.Spec)
if err != nil {
return false, err
}
return !bytes.Equal(prevSpec, curSpec), nil
}
type errRobotSelectorOverlap string
func (r errRobotSelectorOverlap) Error() string {
return fmt.Sprintf("robot %q was selected multiple times", string(r))
}
// generateChartAssignments returns a list of all cloud and robot ChartAssignments
// for the given app, its rollout, and set of robots.
func generateChartAssignments(
al []apps.App,
robots []registry.Robot,
rollout *apps.AppRollout,
baseValues chartutil.Values,
) ([]*apps.ChartAssignment, error) {
var (
// Different entries might request different app versions. This map
// is used to only retrieve them once.
appVersions = map[string]*apps.App{}
cas []*apps.ChartAssignment
// Robots that matched selectors for the rollout and which will be
// passed to the cloud chart.
selectedRobots = map[string]*registry.Robot{}
)
for _, app := range al {
v, ok := app.Labels[labelAppVersion]
if !ok {
// If only app-name is defined, it is an unversioned app.
v = ""
}
if _, ok := appVersions[v]; ok {
slog.Info("App already known. Going to ignore App Object for the same app/version",
slog.String("Known Name", rollout.Spec.AppName),
slog.String("Known Version", v),
slog.String("App Object", app.Name))
} else {
// only add to map if this is the first time we're adding this
// version for the app. The validator should ensure that this
// overwrite cannot happen, but apps might be legacy, or have
// bypassed validation.
appVersions[v] = &app
}
}
for _, rcomp := range rollout.Spec.Robots {
robots, err := matchingRobots(robots, rcomp.Selector)
if err != nil {
return nil, errors.Wrap(err, "select robots")
}
// map is populated by for all the rcomp.Version, no need to check ok
app, ok := appVersions[rcomp.Version]
if !ok {
return nil, fmt.Errorf("no App %q (Version: %q) found", rollout.Spec.AppName, rcomp.Version)
}
comps := app.Spec.Components
for i := range robots {
// Ensure we don't pass a pointer to the most recent loop item.
r := &robots[i]
// No robot must be selected multiple times.
if _, ok := selectedRobots[r.Name]; ok {
return nil, errRobotSelectorOverlap(r.Name)
}
selectedRobots[r.Name] = r
if comps.Robot.Name != "" || comps.Robot.Inline != "" {
cas = append(cas, newRobotChartAssignment(r, app, rollout, &rcomp, baseValues))
}
}
}
// The cloud has has no version, just the canonical version. We might
// not have it due to no robot using it.
if app, ok := appVersions[""]; ok {
comps := app.Spec.Components
if comps.Cloud.Name != "" || comps.Cloud.Inline != "" {
// Turn robot map into a sorted slice so we produce deterministic outputs.
// (Go randomizes map iteration.)
robots := make([]*registry.Robot, 0, len(selectedRobots))
for _, r := range selectedRobots {
robots = append(robots, r)
}
sort.Slice(robots, func(i, j int) bool {
return robots[i].Name < robots[j].Name
})
cas = append(cas, newCloudChartAssignment(app, rollout, baseValues, robots...))
}
} else if rollout.Spec.Cloud.Values != nil {
slog.Info("No canonical version of App. There won't be a Cloud ChartAssignment. AppRollout defines cloud values.",
slog.String("App Name", rollout.Spec.AppName),
slog.String("Rollout Name", rollout.Name))
}
sort.Slice(cas, func(i, j int) bool {
return cas[i].Name < cas[j].Name
})
return cas, nil
}
// newCloudChartAssignment generates a new ChartAssignment for the cloud cluster
// from an app, it's rollout, a set of base configuration values,
// and a list of robots matched by the rollout.
func newCloudChartAssignment(
app *apps.App,
rollout *apps.AppRollout,
values chartutil.Values,
robots ...*registry.Robot,
) *apps.ChartAssignment {
ca := newBaseChartAssignment(app, rollout, &app.Spec.Components.Cloud)
ca.Name = chartAssignmentName(rollout.Name, compTypeCloud, "")
ca.Spec.ClusterName = "cloud"
// Generate robot values list that's injected into the cloud chart.
var robotValuesList []robotValues
for _, r := range robots {
robotValuesList = append(robotValuesList, robotValues{
Name: r.Name,
})
}
vals := chartutil.Values{}
vals.MergeInto(values)
vals.MergeInto(chartutil.Values(rollout.Spec.Cloud.Values))
vals.MergeInto(chartutil.Values{"robots": robotValuesList})
ca.Spec.Chart.Values = apps.ConfigValues(vals)
return ca
}
// newRobotChartAssignment generates a new ChartAssignment for a robot cluster
// from an app, its rollout, and a set of base configuration values.
func newRobotChartAssignment(
robot *registry.Robot,
app *apps.App,
rollout *apps.AppRollout,
spec *apps.AppRolloutSpecRobot,
values chartutil.Values,
) *apps.ChartAssignment {
ca := newBaseChartAssignment(app, rollout, &app.Spec.Components.Robot)
ca.Name = chartAssignmentName(rollout.Name, compTypeRobot, robot.Name)
setLabel(&ca.ObjectMeta, labelRobotName, robot.Name)
ca.Spec.ClusterName = robot.Name
if spec.Version != "" {
ca.Spec.Chart.Version = spec.Version
}
vals := chartutil.Values{}
vals.MergeInto(values)
vals.MergeInto(chartutil.Values(spec.Values))
vals.MergeInto(chartutil.Values{"robot": robotValues{Name: robot.Name}})
ca.Spec.Chart.Values = apps.ConfigValues(vals)
return ca
}
// newChartAssignments returns a new ChartAssignments that's initialized with
// all values that are fixed for the app, its rollout, and component.
func newBaseChartAssignment(app *apps.App, rollout *apps.AppRollout, comp *apps.AppComponent) *apps.ChartAssignment {
var ca apps.ChartAssignment
// Clone labels and annotations. Just setting the map reference
// would cause the map to be shared across objects.
for k, v := range rollout.Labels {
setLabel(&ca.ObjectMeta, k, v)
}
for k, v := range rollout.Annotations {
if k != core.LastAppliedConfigAnnotation {
setAnnotation(&ca.ObjectMeta, k, v)
}
}
ca.Spec.NamespaceName = appNamespaceName(rollout.Name)
if comp.Name != "" {
ca.Spec.Chart = apps.AssignedChart{
Repository: app.Spec.Repository,
Version: app.Spec.Version,
Name: comp.Name,
}
}
ca.Spec.Chart.Inline = comp.Inline
return &ca
}
// matchingRobots returns the subset of robots that pass the given robot selector.
// It returns an error if the selector is invalid.
func matchingRobots(robots []registry.Robot, sel *apps.RobotSelector) ([]registry.Robot, error) {
if sel.Any != nil && *sel.Any {
return robots, nil
}
if sel.LabelSelector == nil {
return nil, nil
}
selector, err := metav1.LabelSelectorAsSelector(sel.LabelSelector)
if err != nil {
return nil, err
}
var res []registry.Robot
for _, r := range robots {
if selector.Matches(labels.Set(r.Labels)) {
res = append(res, r)
}
}
return res, nil
}
func appNamespaceName(rollout string) string {
return fmt.Sprintf("app-%s", rollout)
}
type componentType string
const (
compTypeRobot componentType = "robot"
compTypeCloud = "cloud"
)
func chartAssignmentName(rollout string, typ componentType, robot string) string {
if robot != "" {
return fmt.Sprintf("%s-%s-%s", rollout, typ, robot)
}
return fmt.Sprintf("%s-%s", rollout, typ)
}
// robotValues is the struct that is passed into the cloud chart configuration
// for each robot matched by a rollout.
type robotValues struct {
Name string `json:"name"`
}
func setLabel(o *metav1.ObjectMeta, k, v string) {
if o.Labels == nil {
o.Labels = map[string]string{}
}
o.Labels[k] = v
}
func setAnnotation(o *metav1.ObjectMeta, k, v string) {
if o.Annotations == nil {
o.Annotations = map[string]string{}
}
o.Annotations[k] = v
}
// setOwnerReference adds or updates an owner reference. Existing references
// are detected based on the UID field.
func setOwnerReference(om *metav1.ObjectMeta, ref metav1.OwnerReference) {
for i, or := range om.OwnerReferences {
if ref.UID == or.UID {
om.OwnerReferences[i] = ref
return
}
}
om.OwnerReferences = append(om.OwnerReferences, ref)
}
// indexOwnerReferences indexes resources by the UIDs of their owner references.
func indexOwnerReferences(o kclient.Object) (vs []string) {
ca := o.(*apps.ChartAssignment)
for _, or := range ca.OwnerReferences {
vs = append(vs, string(or.UID))
}
return vs
}
func indexAppName(o kclient.Object) []string {
ar := o.(*apps.AppRollout)
return []string{ar.Spec.AppName}
}
const (
// canonical name of an app
labelAppName = "cloudrobotics.com/app-name"
// version of that app. Note, the default version of the app has
// a version label of ""
labelAppVersion = "cloudrobotics.com/app-version"
)
// NewAppValidationWebhook returns a new webhook that validates Apps.
//
// This pertains to multiple versions of the same app, so that the labels
// defined above are in sync with the name of the App.
// The policy is
// - an unversioned app defines
// - cloudrobotics.com/app-name
// - (optionally): cloudrobotics.com/app-version with a "" value
// this must match the name of the object
//
// - a versioned app defines
// - cloudrobotics.com/app-name
// - cloudrobotics.com/app-version
// the name of the App object must match LOWERCASE([app-name].v[app-version])
func NewAppValidationWebhook(mgr manager.Manager) *admission.Webhook {
return &admission.Webhook{Handler: newAppValidator(mgr.GetScheme())}
}
// appValidator implements a validation webhook.
type appValidator struct {
decoder runtime.Decoder
}
func newAppValidator(sc *runtime.Scheme) *appValidator {
return &appValidator{
decoder: serializer.NewCodecFactory(sc).UniversalDeserializer(),
}
}
func (v *appValidator) Handle(_ context.Context, req admission.Request) admission.Response {
cur := &apps.App{}
if err := runtime.DecodeInto(v.decoder, req.AdmissionRequest.Object.Raw, cur); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if err := appValidate(cur); err != nil {
return admission.Denied(err.Error())
}
return admission.Allowed("")
}
func appValidate(cur *apps.App) error {
name := cur.Name
appName, anok := cur.Labels[labelAppName]
appVersion, avok := cur.Labels[labelAppVersion]
if anok {
if avok && appVersion != "" {
// both name and version are defined
ename := strings.ToLower(fmt.Sprintf("%s.v%s", appName, appVersion))
if ename != name {
return fmt.Errorf("%q=%q, %q=%q: expected object name %q, got %q", labelAppName, appName, labelAppVersion, appVersion, ename, name)
}
} else {
// only name is defined
if appName != name {
return fmt.Errorf("%q=%q, undefined %q: expected object name %q, got %q", labelAppName, appName, labelAppVersion, appName, name)
}
}
}
// neither is defined, we're dealing with a legacy app
return nil
}
// NewAppRolloutValidationWebhook returns a new webhook that validates AppRollouts.
func NewAppRolloutValidationWebhook(mgr manager.Manager) *admission.Webhook {
return &admission.Webhook{Handler: newAppRolloutValidator(mgr.GetScheme())}
}
// appRolloutValidator implements a validation webhook.
type appRolloutValidator struct {
decoder runtime.Decoder
}
func newAppRolloutValidator(sc *runtime.Scheme) *appRolloutValidator {
return &appRolloutValidator{
decoder: serializer.NewCodecFactory(sc).UniversalDeserializer(),
}
}
func (v *appRolloutValidator) Handle(_ context.Context, req admission.Request) admission.Response {
cur := &apps.AppRollout{}
if err := runtime.DecodeInto(v.decoder, req.AdmissionRequest.Object.Raw, cur); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if err := appRolloutValidate(cur); err != nil {
return admission.Denied(err.Error())
}
return admission.Allowed("")
}
func appRolloutValidate(cur *apps.AppRollout) error {
if cur.Spec.AppName == "" {
return errors.New("app name missing")
}
errs := validation.NameIsDNSSubdomain(cur.Spec.AppName, false)
if len(errs) > 0 {
return errors.Errorf("validate app name: %s", strings.Join(errs, ", "))
}
if _, ok := cur.Spec.Cloud.Values["robots"]; ok {
return errors.Errorf(".spec.cloud.values.robots is a reserved field and must not be set")
}
for i, r := range cur.Spec.Robots {
if _, ok := r.Values["robot"]; ok {
return errors.Errorf(".spec.robots[].values.robot is a reserved field and must not be set")
}
if r.Selector == nil {
return errors.Errorf("no selector provided for robots %d", i)
}
// Reject if a selector has neither a matcher nor `any` set.
// This mostly helps catching missing `matchLabels`.
if r.Selector.Any == nil && r.Selector.LabelSelector == nil {
return errors.Errorf("empty selector for robots %d (matchLabels not specified?)", i)
}
}
return nil
}
================================================
FILE: src/go/pkg/controller/approllout/controller_test.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package approllout
import (
"strings"
"testing"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
registry "github.com/googlecloudrobotics/core/src/go/pkg/apis/registry/v1alpha1"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/helm/pkg/chartutil"
"sigs.k8s.io/yaml"
)
func marshalYAML(t *testing.T, v interface{}) string {
t.Helper()
b, err := yaml.Marshal(v)
if err != nil {
t.Fatal(err)
}
return string(b)
}
func unmarshalYAML(t *testing.T, v interface{}, s string) {
t.Helper()
if err := yaml.Unmarshal([]byte(strings.TrimSpace(s)), v); err != nil {
t.Fatal(err)
}
}
func verifyChartAssignment(t *testing.T, want, got *apps.ChartAssignment) {
t.Helper()
// Compare serialized YAML for easier diff detection and to avoid complicated
// comparisons for map[string]interface{} values.
wantStr := marshalYAML(t, want)
gotStr := marshalYAML(t, got)
if wantStr != gotStr {
t.Fatalf("expected ChartAssignment: \n%s\ngot:\n%s\n", wantStr, gotStr)
}
}
func TestNewRobotChartAssignment(t *testing.T) {
var app apps.App
unmarshalYAML(t, &app, `
metadata:
name: foo
spec:
repository: https://example.org/helm
version: 1.2.3
components:
robot:
name: foo-robot
inline: abcdefgh
`)
var rollout apps.AppRollout
unmarshalYAML(t, &rollout, `
metadata:
name: foo-rollout
labels:
lkey1: lval1
annotations:
akey1: aval1
namespace: default
spec:
appName: prometheus
robots:
- selector:
any: true
values:
foo1: bar1
version: 1.2.4
`)
var robot registry.Robot
unmarshalYAML(t, &robot, `
metadata:
name: robot1
`)
baseValues := chartutil.Values{
"foo2": "bar2",
}
var expected apps.ChartAssignment
unmarshalYAML(t, &expected, `
metadata:
name: foo-rollout-robot-robot1
labels:
lkey1: lval1
cloudrobotics.com/robot-name: robot1
annotations:
akey1: aval1
spec:
clusterName: robot1
namespaceName: app-foo-rollout
chart:
repository: https://example.org/helm
version: 1.2.4
name: foo-robot
inline: abcdefgh
values:
robot:
name: robot1
foo1: bar1
foo2: bar2
`)
result := newRobotChartAssignment(&robot, &app, &rollout, &rollout.Spec.Robots[0], baseValues)
verifyChartAssignment(t, &expected, result)
}
func TestNewCloudChartAssignment(t *testing.T) {
var app apps.App
unmarshalYAML(t, &app, `
metadata:
name: foo
spec:
repository: https://example.org/helm
version: 1.2.3
components:
cloud:
name: foo-cloud
inline: abcdefgh
`)
var rollout apps.AppRollout
unmarshalYAML(t, &rollout, `
metadata:
name: foo-rollout
labels:
lkey1: lval1
annotations:
akey1: aval1
namespace: default
spec:
appName: prometheus
cloud:
values:
robots: should_be_overwritten
foo1: bar1
`)
var robot1, robot2 registry.Robot
unmarshalYAML(t, &robot1, `
metadata:
name: robot1
`)
unmarshalYAML(t, &robot2, `
metadata:
name: robot2
`)
baseValues := chartutil.Values{
"foo2": "bar2",
}
var expected apps.ChartAssignment
unmarshalYAML(t, &expected, `
metadata:
name: foo-rollout-cloud
labels:
lkey1: lval1
annotations:
akey1: aval1
spec:
clusterName: cloud
namespaceName: app-foo-rollout
chart:
repository: https://example.org/helm
version: 1.2.3
name: foo-cloud
inline: abcdefgh
values:
robots:
- name: robot1
- name: robot2
foo1: bar1
foo2: bar2
`)
result := newCloudChartAssignment(&app, &rollout, baseValues, &robot1, &robot2)
verifyChartAssignment(t, &expected, result)
}
func TestGenerateChartAssignments(t *testing.T) {
var app apps.App
unmarshalYAML(t, &app, `
metadata:
name: foo
spec:
components:
cloud:
inline: inline-cloud
robot:
inline: inline-robot
`)
var robots [3]registry.Robot
unmarshalYAML(t, &robots[0], `
metadata:
name: robot1
`)
unmarshalYAML(t, &robots[1], `
metadata:
name: robot2
labels:
a: b
`)
unmarshalYAML(t, &robots[2], `
metadata:
name: robot3
labels:
a: c
`)
baseValues := chartutil.Values{
"foo2": "bar2",
}
// Rollout with two selectors that select robot1 and robot3 respectively.
// robot2 is not matched at all.
var rollout apps.AppRollout
unmarshalYAML(t, &rollout, `
metadata:
name: foo-rollout
namespace: default
spec:
appName: foo
cloud:
values:
robots: should_be_overwritten
foo1: bar1
robots:
# robot1
- selector:
matchExpressions:
- {key: a, operator: DoesNotExist}
# robot3
- selector:
matchLabels:
a: c
values:
foo3: bar3
`)
var expected [3]apps.ChartAssignment
unmarshalYAML(t, &expected[0], `
metadata:
name: foo-rollout-cloud
spec:
clusterName: cloud
namespaceName: app-foo-rollout
chart:
inline: inline-cloud
values:
foo1: bar1
foo2: bar2
robots:
- name: robot1
- name: robot3
`)
unmarshalYAML(t, &expected[1], `
metadata:
name: foo-rollout-robot-robot1
labels:
cloudrobotics.com/robot-name: robot1
spec:
clusterName: robot1
namespaceName: app-foo-rollout
chart:
inline: inline-robot
values:
robot:
name: robot1
foo2: bar2
`)
unmarshalYAML(t, &expected[2], `
metadata:
name: foo-rollout-robot-robot3
labels:
cloudrobotics.com/robot-name: robot3
spec:
clusterName: robot3
namespaceName: app-foo-rollout
chart:
inline: inline-robot
values:
robot:
name: robot3
foo2: bar2
foo3: bar3
`)
al := []apps.App{app}
cas, err := generateChartAssignments(al, robots[:], &rollout, baseValues)
if err != nil {
t.Fatalf("Generate failed: %s", err)
}
if len(cas) != len(expected) {
t.Errorf("Expected %d ChartAssignments, got %d", len(expected), len(cas))
}
for i, ca := range cas {
verifyChartAssignment(t, &expected[i], ca)
}
}
// generateApp will generate an app for testing
func generateApp(name, version, robotPayload, cloudPayload string) apps.App {
return apps.App{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
labelAppName: name,
labelAppVersion: version,
},
},
Spec: apps.AppSpec{
Components: apps.AppComponents{
Cloud: apps.AppComponent{
Name: "cloud",
Inline: cloudPayload,
},
Robot: apps.AppComponent{
Name: "robot",
Inline: robotPayload,
},
},
},
}
}
// generateRobot will generate a robot named |name| with the labels
func generateRobot(name string, labels map[string]string) registry.Robot {
return registry.Robot{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
}
}
// generateRollout will create a rollout pointing at AppName
func generateRollout(name, appName string) apps.AppRollout {
return apps.AppRollout{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: apps.AppRolloutSpec{
AppName: appName,
},
}
}
// addRobotToRollout will add a robot component with the match label & version.
func addRobotToRollout(ar *apps.AppRollout, matchLabel, matchValue, version string) {
ar.Spec.Robots = append(ar.Spec.Robots, apps.AppRolloutSpecRobot{
Version: version,
Selector: &apps.RobotSelector{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
matchLabel: matchValue,
},
},
},
})
}
// The tests below deal with vApp, this is a short form for versioned App
// TestGenerateChartAssignments_vApps tests rollout with versioned Apps
func TestGenerateChartAssignments_vApps(t *testing.T) {
appName := "myapp"
appVersion := "17"
testLabel := "test-label"
testValueA := "test-value"
testValueB := "test-value-2"
al := []apps.App{
generateApp(appName, "", "testpayload", ""),
generateApp(appName, appVersion, "testpayload", ""),
}
robots := []registry.Robot{
generateRobot("robot1", map[string]string{
testLabel: testValueA,
}),
generateRobot("robot2", map[string]string{
testLabel: testValueA,
}),
generateRobot("robot3", map[string]string{
testLabel: testValueB,
}),
}
rollout := generateRollout("test", appName)
// a versioned app matching robots testValueA
addRobotToRollout(&rollout, testLabel, testValueA, appVersion)
// the canonical app matching robots testValueB
addRobotToRollout(&rollout, testLabel, testValueB, "")
// Simply test that this works, and does not throw errors.
_, err := generateChartAssignments(al, robots[:], &rollout, nil)
if err != nil {
t.Fatalf("Generate failed: %v", err)
}
}
// TestGenerateChartAssignments_vAppMissing tests a missing App version
func TestGenerateChartAssignments_vAppMissing(t *testing.T) {
appName := "myapp"
appVersion := "17"
newAppVersion := "18"
testLabel := "test-label"
testValueA := "test-value"
al := []apps.App{
generateApp(appName, appVersion, "testpayload", ""),
}
robots := []registry.Robot{
generateRobot("robot1", map[string]string{
testLabel: testValueA,
}),
}
rollout := generateRollout("test", appName)
// a versioned app matching robots testValueA
addRobotToRollout(&rollout, testLabel, testValueA, newAppVersion)
// Simply test that this works, and does throw an error.
_, err := generateChartAssignments(al, robots[:], &rollout, nil)
if err == nil {
t.Fatal("Requesting a non-existant app should fail, but didn't")
}
}
// TestGenerateChartAssignments_vAppCloud tests a cloud cas
func TestGenerateChartAssignments_vAppCloud(t *testing.T) {
appName := "myapp"
appVersion := "17"
testLabel := "test-label"
testValueA := "test-value"
al := []apps.App{
// Cloud does not have a Version field, and thus requires this
// non versioned, canonical app.
generateApp(appName, "", "testpayload", "cloudpayload"),
generateApp(appName, appVersion, "testpayload", ""),
}
robots := []registry.Robot{
generateRobot("robot1", map[string]string{
testLabel: testValueA,
}),
}
rollout := generateRollout("test", appName)
// a versioned app matching robots testValueA
addRobotToRollout(&rollout, testLabel, testValueA, appVersion)
// expand to cover cloud
rollout.Spec.Cloud = apps.AppRolloutSpecCloud{}
// Simply test that this works, and does throw an error.
cas, err := generateChartAssignments(al, robots[:], &rollout, nil)
if err != nil {
t.Fatal("generate chart assignment with cloud rollout")
}
if len(cas) != 2 {
t.Fatalf("chart assingments, expected 2, got %d", len(cas))
}
}
// TestGenerateChartAssignments_vAppCloudMissing tests cloud rollout w/out App
//
// As the Cloud field is not versioned, it needs the canonical version of the
// app to exist. In this test, we only create versioned Apps, and thus
// the generation will fail.
func TestGenerateChartAssignments_vAppCloudMissing(t *testing.T) {
appName := "myapp"
appVersion := "17"
al := []apps.App{
// only the versioned App will exist.
generateApp(appName, appVersion, "testpayload", "cloudpayload"),
}
rollout := generateRollout("test", appName)
// expand to cover cloud
rollout.Spec.Cloud = apps.AppRolloutSpecCloud{}
// Simply test that this works, and does throw an error.
_, err := generateChartAssignments(al, nil, &rollout, nil)
if err != nil {
t.Fatal("cloud rollout without unversioned app should pass, if rollout has no Cloud Values in Spec.")
}
rollout.Spec.Cloud = apps.AppRolloutSpecCloud{Values: apps.ConfigValues{
"test": 1,
}}
// Simply test that this works, and does throw an error.
_, err = generateChartAssignments(al, nil, &rollout, nil)
if err != nil {
t.Fatal("cloud rollout without unversioned app should pass, (and log) if rollout has Cloud Values in Spec.")
}
}
func TestGenerateChartAssignments_cloudPerRobot(t *testing.T) {
var app apps.App
unmarshalYAML(t, &app, `
metadata:
name: foo
spec:
components:
cloud:
inline: inline-cloud
`)
var robots [2]registry.Robot
unmarshalYAML(t, &robots[0], `
metadata:
name: robot1
`)
unmarshalYAML(t, &robots[1], `
metadata:
name: robot2
labels:
a: b
`)
// Rollout selects robot1, but not robot2.
var rollout apps.AppRollout
unmarshalYAML(t, &rollout, `
metadata:
name: foo-rollout
namespace: default
spec:
appName: foo
cloud:
values:
robots: should_be_overwritten
robots:
# robot1
- selector:
matchExpressions:
- {key: a, operator: DoesNotExist}
`)
var expected apps.ChartAssignment
unmarshalYAML(t, &expected, `
metadata:
name: foo-rollout-cloud
spec:
clusterName: cloud
namespaceName: app-foo-rollout
chart:
inline: inline-cloud
values:
robots:
- name: robot1
`)
al := []apps.App{app}
cas, err := generateChartAssignments(al, robots[:], &rollout, nil)
if err != nil {
t.Fatalf("Generate failed: %s", err)
}
if len(cas) != 1 {
t.Fatalf("Expected 1 ChartAssignments, got %d", len(cas))
}
verifyChartAssignment(t, &expected, cas[0])
}
func TestGenerateChartAssignments_selectorOverlap(t *testing.T) {
var app apps.App
unmarshalYAML(t, &app, `
metadata:
name: foo
spec:
components:
robot:
inline: inline-robot
`)
var robots [2]registry.Robot
unmarshalYAML(t, &robots[0], `
metadata:
name: robot1
`)
unmarshalYAML(t, &robots[1], `
metadata:
name: robot2
labels:
a: b
`)
// Rollout with two selectors that match the same robot.
var rollout apps.AppRollout
unmarshalYAML(t, &rollout, `
metadata:
name: foo-rollout
spec:
appName: foo
robots:
- selector:
any: true
- selector:
matchLabels:
a: b
`)
al := []apps.App{app}
_, err := generateChartAssignments(al, robots[:], &rollout, nil)
if exp := errRobotSelectorOverlap("robot2"); err != exp {
t.Fatalf("expected error %q but got %q", exp, err)
}
}
func TestSetStatus(t *testing.T) {
var ca1, ca2, ca3 apps.ChartAssignment
unmarshalYAML(t, &ca1, `
metadata:
name: ca1
status:
phase: Failed
`)
unmarshalYAML(t, &ca2, `
metadata:
name: ca2
status:
phase: Settled
`)
unmarshalYAML(t, &ca3, `
metadata:
name: ca3
status:
phase: Ready
`)
var ar apps.AppRollout
setStatus(&ar, 100, []apps.ChartAssignment{ca1, ca2, ca3})
if ar.Status.Assignments != 100 {
t.Errorf("Expected .status.assignments to be %d but got %d", 100, ar.Status.Assignments)
}
if ar.Status.FailedAssignments != 1 {
t.Errorf("Expected .status.failedAssignments to be %d but got %d", 1, ar.Status.FailedAssignments)
}
if ar.Status.SettledAssignments != 2 {
t.Errorf("Expected .status.settledAssignments to be %d but got %d", 2, ar.Status.SettledAssignments)
}
if ar.Status.ReadyAssignments != 1 {
t.Errorf("Expected .status.readyAssignments to be %d but got %d", 1, ar.Status.ReadyAssignments)
}
if c := ar.Status.Conditions[0]; c.Type != apps.AppRolloutConditionSettled ||
c.Status != core.ConditionFalse {
t.Errorf("Unexpected first condition %v, expected Settled=False", c)
}
if c := ar.Status.Conditions[1]; c.Type != apps.AppRolloutConditionReady ||
c.Status != core.ConditionFalse {
t.Errorf("Unexpected second condition %v, expected Ready=False", c)
}
}
func TestValidateAppRollout(t *testing.T) {
cases := []struct {
name string
cur string
shouldFail bool
}{
{
name: "valid-all",
cur: `
spec:
appName: myapp
cloud:
values:
a: 2
b: {c: 3}
robots:
- selector:
any: true
values:
c: d
- selector:
matchLabels:
abc: def
foo: bar
- selector:
matchExpressions:
- {key: foo, Op: DoesExist}
`,
},
{
name: "valid-app-name-only",
cur: `
spec:
appName: my-app.123
`,
},
{
name: "missing-app-name",
cur: `spec: {}`,
shouldFail: true,
},
{
name: "invalid-app-name",
cur: `
spec:
appName: my%app
`,
shouldFail: true,
},
{
name: "missing-robot-selector",
cur: `
spec:
appName: myapp
robots:
- values:
a: b
`,
shouldFail: true,
},
{
name: "wrong-selector",
cur: `
spec:
appName: myapp
robots:
- selector:
a: b
`,
shouldFail: true,
},
{
name: "cloud-values-Robots",
cur: `
spec:
appName: myapp
cloud:
values:
robots:
c: d
`,
shouldFail: true,
},
{
name: "robot-values-robot",
cur: `
spec:
appName: myapp
robots:
- selector:
any: true
values:
robot:
c: d
`,
shouldFail: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
var cur apps.AppRollout
unmarshalYAML(t, &cur, c.cur)
err := appRolloutValidate(&cur)
if err == nil && c.shouldFail {
t.Fatal("expected failure but got none")
}
if err != nil && !c.shouldFail {
t.Fatalf("unexpected error: %s", err)
}
})
}
}
// TestValidateApp tests all the labels and mechanism for the apps
func TestValidateApp(t *testing.T) {
cases := []struct {
name string
cur string
shouldFail bool
}{
{
name: "valid",
cur: `
metadata:
name: app
`,
},
{
name: "valid-app-label-only",
cur: `
metadata:
name: app
labels:
cloudrobotics.com/app-name: app
`,
},
{
name: "valid-both-labels",
cur: `
metadata:
name: app.v17
labels:
cloudrobotics.com/app-name: app
cloudrobotics.com/app-version: 17
`,
},
{
name: "valid-both-labels-lower",
cur: `
metadata:
name: app.v17rc00
labels:
cloudrobotics.com/app-name: app
cloudrobotics.com/app-version: 17RC00
`,
},
{
name: "invalid-app-label-only",
shouldFail: true,
cur: `
metadata:
name: app2
labels:
cloudrobotics.com/app-name: app
`,
},
{
name: "invalid-both-labels",
shouldFail: true,
cur: `
metadata:
name: app.v17rc10
labels:
cloudrobotics.com/app-name: app
cloudrobotics.com/app-version: 17RC00
`,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
var cur apps.App
unmarshalYAML(t, &cur, c.cur)
err := appValidate(&cur)
if err == nil && c.shouldFail {
t.Fatal("expected failure but got none")
}
if err != nil && !c.shouldFail {
t.Fatalf("unexpected error: %s", err)
}
})
}
}
================================================
FILE: src/go/pkg/controller/chartassignment/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test", "gomock")
go_library(
name = "go_default_library",
srcs = [
"controller.go",
"release.go",
"validator.go",
],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/controller/chartassignment",
visibility = ["//visibility:public"],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"//src/go/pkg/gcr:go_default_library",
"//src/go/pkg/synk:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_apimachinery//pkg/api/errors:go_default_library",
"@io_k8s_apimachinery//pkg/api/validation:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured:go_default_library",
"@io_k8s_apimachinery//pkg/runtime:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/serializer:go_default_library",
"@io_k8s_cli_runtime//pkg/resource:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
"@io_k8s_client_go//tools/record:go_default_library",
"@io_k8s_client_go//util/workqueue:go_default_library",
"@io_k8s_helm//pkg/chartutil:go_default_library",
"@io_k8s_helm//pkg/downloader:go_default_library",
"@io_k8s_helm//pkg/getter:go_default_library",
"@io_k8s_helm//pkg/helm/helmpath:go_default_library",
"@io_k8s_helm//pkg/proto/hapi/chart:go_default_library",
"@io_k8s_helm//pkg/renderutil:go_default_library",
"@io_k8s_helm//pkg/repo:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/client:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/controller:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/event:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/handler:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/manager:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/reconcile:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/source:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/webhook/admission:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
gomock(
name = "synk_interface",
out = "synk_interface_test.go",
interfaces = ["Interface"],
library = "//src/go/pkg/synk:go_default_library",
package = "chartassignment",
)
go_test(
name = "go_default_test",
size = "small",
srcs = [
"release_test.go",
"synk_interface_test.go",
"validator_test.go",
],
embed = [":go_default_library"],
visibility = ["//visibility:private"],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"//src/go/pkg/kubetest:go_default_library",
"@com_github_golang_mock//gomock:go_default_library",
"@io_k8s_client_go//tools/record:go_default_library",
"@io_k8s_helm//pkg/chartutil:go_default_library",
"@io_k8s_sigs_yaml//:go_default_library",
],
)
================================================
FILE: src/go/pkg/controller/chartassignment/controller.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chartassignment
import (
"context"
"fmt"
"log/slog"
"time"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
"github.com/googlecloudrobotics/core/src/go/pkg/gcr"
"github.com/googlecloudrobotics/ilog"
"github.com/pkg/errors"
core "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
kclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
// Allow the Service Account Controller some time to create the default
// SA in a new namespace.
defaultServiceAccountDeadline = time.Minute
fieldIndexNamespace = "spec.namespaceName"
statusCheckingOptOutLabel = "cloudrobotics.com/opt-out-error-checking"
)
// Add adds a controller and validation webhook for the ChartAssignment resource type
// to the manager and server.
// Handled ChartAssignments are filtered by the provided cluster.
func Add(ctx context.Context, mgr manager.Manager, cloud bool) error {
r := &Reconciler{
kube: mgr.GetClient(),
recorder: mgr.GetEventRecorderFor("chartassignment-controller"),
cloud: cloud,
}
var err error
r.releases, err = newReleases(mgr.GetConfig(), r.recorder)
if err != nil {
return err
}
c, err := controller.New("chartassignment", mgr, controller.Options{
Reconciler: r,
})
if err != nil {
return err
}
err = mgr.GetCache().IndexField(ctx, &apps.ChartAssignment{}, fieldIndexNamespace,
func(o kclient.Object) []string {
return []string{o.(*apps.ChartAssignment).Spec.NamespaceName}
},
)
if err != nil {
return errors.Wrap(err, "add field indexer")
}
err = c.Watch(
source.Kind(mgr.GetCache(), &apps.ChartAssignment{}),
&handler.EnqueueRequestForObject{},
)
if err != nil {
return err
}
err = c.Watch(
source.Kind(mgr.GetCache(), &core.Pod{}),
&handler.Funcs{
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
r.enqueueForPod(ctx, e.Object, q)
},
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
r.enqueueForPod(ctx, e.ObjectNew, q)
},
DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {
r.enqueueForPod(ctx, e.Object, q)
},
},
)
if err != nil {
return errors.Wrap(err, "watch Apps")
}
return nil
}
func (r *Reconciler) enqueueForPod(ctx context.Context, m meta.Object, q workqueue.RateLimitingInterface) {
var cas apps.ChartAssignmentList
err := r.kube.List(ctx, &cas, kclient.MatchingFields(map[string]string{fieldIndexNamespace: m.GetNamespace()}))
if err != nil {
slog.Error("List ChartAssignments failed", slog.String("Namespace", m.GetNamespace()), ilog.Err(err))
return
}
for _, ca := range cas.Items {
q.Add(reconcile.Request{
NamespacedName: kclient.ObjectKey{Name: ca.Name},
})
}
}
// Reconciler provides an idempotent function that brings the cluster into a
// state consistent with the specification of a ChartAssignment.
type Reconciler struct {
kube kclient.Client
recorder record.EventRecorder
releases *releases
cloud bool
}
// Reconcile creates and updates a Synk ResourceSet for the given chart
// assignment. It rolls back releases to the previous revision if an upgrade
// failed. It continuously requeues the ChartAssignment for reconciliation to
// monitor the status of the ResourceSet.
func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
var as apps.ChartAssignment
err := r.kube.Get(ctx, req.NamespacedName, &as)
if k8serrors.IsNotFound(err) {
// Assignment was already deleted. We did all required cleanup
// when removing the finalizer. Thus, there's nothing to do.
slog.Info("ChartAssignment no longer exists, skipping reconciliation...", slog.Any("Name", req.NamespacedName))
return reconcile.Result{}, nil
} else if err != nil {
return reconcile.Result{}, fmt.Errorf("getting ChartAssignment %q failed: %s", req, err)
}
// Reconcile no ChartAssignments for robots on the cloud.
// We do have ChartAssignments without the robot label on the robot but they
// do not pass through the cloud cluster.
// Labels is of type map[string]string but we care only for the existence
// of the key.
_, isRobot := as.Labels["cloudrobotics.com/robot-name"]
if r.cloud && isRobot {
return reconcile.Result{}, nil
}
return r.reconcile(ctx, &as)
}
const (
// The finalizer that's applied to assignments to block their garbage collection
// until the Synk ResourceSet is deleted.
finalizer = "helm.apps.cloudrobotics.com"
// Requeue interval when the underlying Synk ResourceSet is not in a stable state yet.
requeueFast = 3 * time.Second
// Requeue interval after the underlying Synk ResourceSete reached a stable state.
requeueSlow = 3 * time.Minute
)
// namespaceDeletionError indicates that a namespace could not be created
// because a previously-created namespace with the same name is pending
// deletion. This occurs when you delete and recreate a chartassignment. It is
// transient, but may last seconds or minutes if the namespace contains
// resources that are slow to delete.
type namespaceDeletionError struct {
msg string
}
func (e *namespaceDeletionError) Error() string { return e.msg }
// missingServiceAccountError indicates that the default ServiceAccount has not
// yet been created, and that the chart should not be updated to avoid creating
// pods before the ImagePullSecrets have been applied.
type missingServiceAccountError struct {
msg string
}
func (e *missingServiceAccountError) Error() string { return e.msg }
func (r *Reconciler) ensureNamespace(ctx context.Context, as *apps.ChartAssignment) (*core.Namespace, error) {
// Create application namespace if it doesn't exist.
var ns core.Namespace
err := r.kube.Get(ctx, kclient.ObjectKey{Name: as.Spec.NamespaceName}, &ns)
if err != nil && !k8serrors.IsNotFound(err) {
return nil, fmt.Errorf("getting Namespace %q failed: %s", as.Spec.NamespaceName, err)
}
if ns.DeletionTimestamp != nil {
return nil, &namespaceDeletionError{
msg: fmt.Sprintf("namespace %q was marked for deletion at %s, skipping", as.Spec.NamespaceName, ns.DeletionTimestamp),
}
}
createNamespace := k8serrors.IsNotFound(err)
ns.Name = as.Spec.NamespaceName
ns.Labels = map[string]string{"app": as.Name}
// Add ourselves to the owners if we aren't already.
_true := true
added := setOwnerReference(&ns.ObjectMeta, meta.OwnerReference{
APIVersion: as.APIVersion,
Kind: as.Kind,
Name: as.Name,
UID: as.UID,
BlockOwnerDeletion: &_true,
})
if !added {
return &ns, nil
}
if createNamespace {
return &ns, r.kube.Create(ctx, &ns)
}
return &ns, r.kube.Update(ctx, &ns)
}
// ensureSecrets copies secrets from the default namespace, since service
// accounts and pods cannot reference secrets in other namespaces.
func (r *Reconciler) ensureSecrets(ctx context.Context, as *apps.ChartAssignment) error {
var secrets core.SecretList
err := r.kube.List(ctx, &secrets, kclient.MatchingLabels(map[string]string{
"cloudrobotics.com/copy-to-chart-namespaces": "true",
}))
for _, secret := range secrets.Items {
// Drop the resourceVersion/uid/etc from the copied resource, to avoid
// confusing the apiserver, and drop annotations/labels that aren't
// needed. We just need the name and the new namespace.
secret.ObjectMeta = meta.ObjectMeta{
Namespace: as.Spec.NamespaceName,
Name: secret.Name,
}
err = r.kube.Create(ctx, &secret)
if k8serrors.IsAlreadyExists(err) {
return nil
} else if err != nil {
return fmt.Errorf("create Secret %s/%s: %w", as.Spec.NamespaceName, secret.Name, err)
}
}
return nil
}
// ensureServiceAccount makes sure we have an image pull secret for gcr.io inside the apps namespace
// and the default service account configured to use it. This is needed to make apps work that
// reference images from a private container registry.
// TODO(ensonic): Put this behind a flag to only do this as needed.
func (r *Reconciler) ensureServiceAccount(ctx context.Context, ns *core.Namespace, as *apps.ChartAssignment) error {
if r.cloud {
// We don't need any of this for cloud charts.
return nil
}
// Check for the image pull secret that the SA will refer to.
var secret core.Secret
err := r.kube.Get(ctx, kclient.ObjectKey{Namespace: as.Spec.NamespaceName, Name: gcr.SecretName}, &secret)
if k8serrors.IsNotFound(err) {
return nil
}
// Configure the default service account in the namespace.
var sa core.ServiceAccount
err = r.kube.Get(ctx, kclient.ObjectKey{Namespace: as.Spec.NamespaceName, Name: "default"}, &sa)
if err != nil {
if k8serrors.IsNotFound(err) && time.Since(ns.CreationTimestamp.Time) < defaultServiceAccountDeadline {
// The Service Account Controller hasn't created the default SA yet.
return &missingServiceAccountError{
msg: fmt.Sprintf("ServiceAccount \"%s:default\" not yet created", ns.Name),
}
}
return fmt.Errorf("getting ServiceAccount \"%s:default\" failed: %s", as.Spec.NamespaceName, err)
}
// Only add the secret once.
ips := core.LocalObjectReference{Name: gcr.SecretName}
found := false
for _, s := range sa.ImagePullSecrets {
if s == ips {
found = true
break
}
}
if !found {
sa.ImagePullSecrets = append(sa.ImagePullSecrets, ips)
}
return r.kube.Update(ctx, &sa)
}
func (r *Reconciler) reconcile(ctx context.Context, as *apps.ChartAssignment) (reconcile.Result, error) {
// If we are scheduled for deletion, delete the Synk ResourceSet and drop our
// finalizer so garbage collection can continue.
if as.DeletionTimestamp != nil {
slog.Info("Ensure ChartAssignment cleanup", slog.String("Name", as.Name))
if err := r.ensureDeleted(ctx, as); err != nil {
return reconcile.Result{}, fmt.Errorf("ensure deleted: %s", err)
}
if err := r.setStatus(ctx, as); err != nil {
return reconcile.Result{}, fmt.Errorf("set status: %s", err)
}
// Requeue to track deletion progress.
return reconcile.Result{Requeue: true, RequeueAfter: requeueFast}, nil
}
ns, err := r.ensureNamespace(ctx, as)
if err != nil {
if _, ok := err.(*namespaceDeletionError); ok {
slog.Error("Ensure namespace", ilog.Err(err))
// Requeue to track deletion progress.
return reconcile.Result{Requeue: true, RequeueAfter: requeueFast}, nil
}
return reconcile.Result{}, fmt.Errorf("ensure namespace: %s", err)
}
if err := r.ensureSecrets(ctx, as); err != nil {
return reconcile.Result{}, fmt.Errorf("ensure secrets: %s", err)
}
if err := r.ensureServiceAccount(ctx, ns, as); err != nil {
if _, ok := err.(*missingServiceAccountError); ok {
slog.Warn("Reconcile failed. This is expected to occur rarely.", ilog.Err(err))
return reconcile.Result{Requeue: true, RequeueAfter: requeueFast}, nil
} else {
return reconcile.Result{}, fmt.Errorf("ensure service-account: %s", err)
}
}
// Ensure a finalizer on the ChartAssignment so we don't get deleted before
// we've properly deleted the associated Synk ResourceSet.
if !stringsContain(as.Finalizers, finalizer) {
as.Finalizers = append(as.Finalizers, finalizer)
if err := r.kube.Update(ctx, as); err != nil {
return reconcile.Result{}, errors.Wrap(err, "add finalizer")
}
}
r.releases.ensureUpdated(as)
if err := r.setStatus(ctx, as); err != nil {
if k8serrors.IsConflict(err) {
// The cache has an old status. This can be ignored, as
// controller-runtime will reconcile again when the cache updates:
// https://github.com/kubernetes-sigs/controller-runtime/issues/377
return reconcile.Result{}, nil
}
return reconcile.Result{}, errors.Wrap(err, "update status")
}
// Quickly requeue for status updates when deployment is in progress.
switch as.Status.Phase {
case apps.ChartAssignmentPhaseReady, apps.ChartAssignmentPhaseFailed:
return reconcile.Result{Requeue: true, RequeueAfter: requeueSlow}, nil
}
return reconcile.Result{Requeue: true, RequeueAfter: requeueFast}, nil
}
func condition(b bool) core.ConditionStatus {
if b {
return core.ConditionTrue
}
return core.ConditionFalse
}
func (r *Reconciler) setStatus(ctx context.Context, as *apps.ChartAssignment) error {
status, ok := r.releases.status(as.Name)
if !ok {
return nil
} else if status.phase == apps.ChartAssignmentPhaseDeleted {
// The assignment may have been garbage collected already, so
// don't try to update the status.
return nil
}
as.Status.ObservedGeneration = as.Generation
as.Status.Phase = status.phase
if c := condition(status.phase == apps.ChartAssignmentPhaseSettled); status.err == nil {
setCondition(as, apps.ChartAssignmentConditionSettled, c, "")
} else {
setCondition(as, apps.ChartAssignmentConditionSettled, c, status.err.Error())
}
var ns core.Namespace
if err := r.kube.Get(ctx, kclient.ObjectKey{Name: as.Spec.NamespaceName}, &ns); err != nil {
if k8serrors.IsNotFound(err) {
setCondition(as, apps.ChartAssignmentConditionReady, condition(false),
"waiting for namespace creation")
} else {
return errors.Wrap(err, "get namespace")
}
} else {
// Determine readiness based on pods in the app namespace being ready.
// This is an incomplete heuristic but it should catch the vast majority of errors.
var pods core.PodList
// Note, this return 0 is the namespace has not been created!
if err := r.kube.List(ctx, &pods, kclient.InNamespace(as.Spec.NamespaceName)); err != nil {
return errors.Wrap(err, "list pods")
}
// Omit pods that have opted out of status checking.
var filteredPods []core.Pod
for _, pod := range pods.Items {
if val, exists := pod.Labels[statusCheckingOptOutLabel]; !exists || val != "true" {
filteredPods = append(filteredPods, pod)
}
}
ready, total := 0, len(filteredPods)
for _, p := range filteredPods {
switch p.Status.Phase {
case core.PodRunning, core.PodSucceeded:
ready++
}
}
// Readiness is only given if the release is settled to begin with.
if status.phase != apps.ChartAssignmentPhaseSettled {
setCondition(as, apps.ChartAssignmentConditionReady, core.ConditionFalse,
"Release not settled yet")
} else {
if ready == total {
as.Status.Phase = apps.ChartAssignmentPhaseReady
}
setCondition(as, apps.ChartAssignmentConditionReady, condition(ready == total),
fmt.Sprintf("%d/%d pods are running or succeeded", ready, total))
}
}
return r.kube.Status().Update(ctx, as)
}
// ensureDeleted ensures that the Synk ResourceSet is deleted and the finalizer gets removed.
func (r *Reconciler) ensureDeleted(ctx context.Context, as *apps.ChartAssignment) error {
r.releases.ensureDeleted(as)
status, ok := r.releases.status(as.Name)
if !ok {
return fmt.Errorf("release status not found")
}
if status.phase != apps.ChartAssignmentPhaseDeleted {
// Deletion still in progress, check again later.
return nil
}
if !stringsContain(as.Finalizers, finalizer) {
return nil
}
as.Finalizers = stringsDelete(as.Finalizers, finalizer)
if err := r.kube.Update(ctx, as); err != nil {
return fmt.Errorf("update failed: %s", err)
}
return nil
}
func stringsContain(list []string, s string) bool {
for _, x := range list {
if x == s {
return true
}
}
return false
}
func stringsDelete(list []string, s string) (res []string) {
for _, x := range list {
if x != s {
res = append(res, x)
}
}
return res
}
// setOwnerReference ensures the owner reference is set and returns true if it did
// not exist before. Existing references are detected based on the UID field.
func setOwnerReference(om *meta.ObjectMeta, ref meta.OwnerReference) bool {
for i, or := range om.OwnerReferences {
if ref.UID == or.UID {
om.OwnerReferences[i] = ref
return false
}
}
om.OwnerReferences = append(om.OwnerReferences, ref)
return true
}
// inCondition returns true if the ChartAssignment has a condition of the given
// type in state true.
func inCondition(as *apps.ChartAssignment, c apps.ChartAssignmentConditionType) bool {
for _, cond := range as.Status.Conditions {
if cond.Type == c && cond.Status == core.ConditionTrue {
return true
}
}
return false
}
// setCondition adds or updates a condition. Existing conditions are detected
// based on the Type field.
func setCondition(as *apps.ChartAssignment, t apps.ChartAssignmentConditionType, v core.ConditionStatus, msg string) {
now := meta.Now()
for i, c := range as.Status.Conditions {
if c.Type != t {
continue
}
// Update existing condition.
if c.Status != v || c.Message != msg {
c.LastUpdateTime = now
}
if c.Status != v {
c.LastTransitionTime = now
}
c.Message = msg
c.Status = v
as.Status.Conditions[i] = c
return
}
// Condition set for the first time.
as.Status.Conditions = append(as.Status.Conditions, apps.ChartAssignmentCondition{
Type: t,
LastUpdateTime: now,
LastTransitionTime: now,
Status: v,
Message: msg,
})
}
================================================
FILE: src/go/pkg/controller/chartassignment/release.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chartassignment
import (
"bytes"
"context"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
"strings"
"sync"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
"github.com/googlecloudrobotics/core/src/go/pkg/synk"
"github.com/googlecloudrobotics/ilog"
"github.com/pkg/errors"
"go.opencensus.io/trace"
core "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
"k8s.io/helm/pkg/chartutil"
"k8s.io/helm/pkg/downloader"
"k8s.io/helm/pkg/getter"
"k8s.io/helm/pkg/helm/helmpath"
"k8s.io/helm/pkg/proto/hapi/chart"
"k8s.io/helm/pkg/renderutil"
"k8s.io/helm/pkg/repo"
)
// releases is a cache of releases currently handled.
type releases struct {
recorder record.EventRecorder
synk synk.Interface
mtx sync.Mutex
m map[string]*release
}
func newReleases(cfg *rest.Config, rec record.EventRecorder) (*releases, error) {
synk, err := synk.NewForConfig(cfg)
if err != nil {
return nil, err
}
return &releases{
recorder: rec,
m: map[string]*release{},
synk: synk,
}, nil
}
// release is a cache object which acts as a proxy for Synk ResourceSets.
type release struct {
name string
synk synk.Interface
recorder record.EventRecorder
actorc chan func()
gen int64 // last deployed generation.
mtx sync.Mutex
status releaseStatus
}
type releaseStatus struct {
phase apps.ChartAssignmentPhase
err error // last encountered error
retry bool // whether deployment should be retried.
}
// status returns the current phase and error of the release. ok is false
// if the release does not exist in the cache.
func (rs *releases) status(name string) (releaseStatus, bool) {
rs.mtx.Lock()
r, ok := rs.m[name]
rs.mtx.Unlock()
if !ok {
return releaseStatus{}, false
}
r.mtx.Lock()
defer r.mtx.Unlock()
return r.status, true
}
// add a release to the cache with an initial phase.
func (rs *releases) add(name string) *release {
rs.mtx.Lock()
defer rs.mtx.Unlock()
r, ok := rs.m[name]
if ok {
return r
}
r = &release{
name: name,
synk: rs.synk,
recorder: rs.recorder,
actorc: make(chan func()),
}
r.status.phase = apps.ChartAssignmentPhaseAccepted
rs.m[name] = r
// Start applying updates in the background.
go r.run()
return r
}
// ensureUpdated ensures that the ChartAssignment is installed as a Synk
// ResourceSet.
// It returns true if it could initiate an update successfully.
func (rs *releases) ensureUpdated(as *apps.ChartAssignment) bool {
r := rs.add(as.Name)
status, _ := rs.status(as.Name)
// If the last generation we deployed matches the provided one, there's
// nothing to do. Unless the previous update set the retry flag due to
// a transient error.
// For a fresh release object, a first update will always happen as
// r.generation is 0 and resource generations start at 1.
if r.generation() == as.Generation && !status.retry {
return true
}
// update() starts by loading the chart. Set this before returning so the
// caller sees the right phase.
r.setPhase(apps.ChartAssignmentPhaseLoadingChart)
asCopy := as.DeepCopy()
started := r.start(func() { r.update(asCopy) })
if started {
r.setGeneration(as.Generation)
}
return started
}
// ensureDeleted ensures that deletion of the release is run.
// It returns true if it could initiate deletion successfully.
func (rs *releases) ensureDeleted(as *apps.ChartAssignment) bool {
r := rs.add(as.Name)
asCopy := as.DeepCopy()
return r.start(func() { r.delete(asCopy) })
}
// run all functions sent on the actor channel in sequence.
func (r *release) run() {
for f := range r.actorc {
f()
}
}
// start tries to launch f on the worker goroutine.
// If there's already a function running, it immediately returns false.
func (r *release) start(f func()) bool {
select {
case r.actorc <- f:
return true
default:
}
return false
}
func (r *release) setPhase(p apps.ChartAssignmentPhase) {
r.mtx.Lock()
r.status.phase = p
r.status.err = nil
r.status.retry = false
r.mtx.Unlock()
}
func (r *release) generation() int64 {
r.mtx.Lock()
defer r.mtx.Unlock()
return r.gen
}
func (r *release) setGeneration(generation int64) {
r.mtx.Lock()
r.gen = generation
r.mtx.Unlock()
}
func (r *release) setFailed(err error, retry bool) {
r.mtx.Lock()
if !retry {
slog.Warn("chart failed", slog.Any("phase", r.status.phase), ilog.Err(err))
// We only update the phase for non-retriable errors. This mitigates a
// race condition between ensureUpdated, which sets phase=Updating when
// retrying, and setStatus, which reads either the old phase or Updating
// and copies it to the chartassignment status.
r.status.phase = apps.ChartAssignmentPhaseFailed
} else {
slog.Warn("chart failed (retrying)", slog.Any("phase", r.status.phase), ilog.Err(err))
}
r.status.err = err
r.status.retry = retry
r.mtx.Unlock()
}
func (r *release) delete(as *apps.ChartAssignment) {
r.mtx.Lock()
currentPhase := r.status.phase
r.mtx.Unlock()
if currentPhase == apps.ChartAssignmentPhaseDeleted {
return
}
r.setPhase(apps.ChartAssignmentPhaseDeleting)
r.recorder.Event(as, core.EventTypeNormal, "DeleteChart", "deleting chart")
if err := r.synk.Delete(context.Background(), as.Name); err != nil {
r.recorder.Event(as, core.EventTypeWarning, "Failure", err.Error())
r.setFailed(errors.Wrap(err, "delete release"), synk.IsTransientErr(err))
}
r.recorder.Event(as, core.EventTypeNormal, "Success", "chart deleted successfully")
r.setPhase(apps.ChartAssignmentPhaseDeleted)
// Reset last deployed generation to 0 as the ChartAssignment will be deleted
// and its generation start at 1 again if it is re-created.
r.setGeneration(0)
}
func (r *release) update(as *apps.ChartAssignment) {
resources, retry, err := loadAndExpandChart(as)
if err != nil {
r.recorder.Event(as, core.EventTypeWarning, "Failure", err.Error())
r.setFailed(err, retry)
return
}
r.setPhase(apps.ChartAssignmentPhaseUpdating)
r.recorder.Event(as, core.EventTypeNormal, "UpdateChart", "update chart")
opts := &synk.ApplyOptions{
Namespace: as.Spec.NamespaceName,
EnforceNamespace: true,
Log: func(r *unstructured.Unstructured, action apps.ResourceAction, status, msg string) {
if status == synk.StatusSuccess {
return
}
// Resource is meant to be human-readable
// We should not use 'Message' as key to prevent collisions between the
// log message and its arguments.
slog.Warn("Error applying resource",
slog.String("Status", strings.ToUpper(status)),
slog.String("Action", string(action)),
slog.String("Resource", fmt.Sprintf("%s/%s %s", r.GetAPIVersion(), r.GetKind(), r.GetName())),
slog.String("Note", msg))
},
}
spanContext := trace.SpanContext{}
if tid, found := as.GetAnnotations()["cloudrobotics.com/trace-id"]; found {
if _, err := hex.Decode(spanContext.TraceID[:], []byte(tid)); err != nil {
slog.Error("decoding TraceID", slog.String("TraceID", tid), ilog.Err(err))
}
}
ctx, span := trace.StartSpanWithRemoteParent(context.Background(), "Apply "+as.Name, spanContext)
_, err = r.synk.Apply(ctx, as.Name, opts, resources...)
span.End()
if err != nil {
r.recorder.Event(as, core.EventTypeWarning, "Failure", err.Error())
r.setFailed(err, synk.IsTransientErr(err))
return
}
r.recorder.Event(as, core.EventTypeNormal, "Success", "chart updated successfully")
r.setPhase(apps.ChartAssignmentPhaseSettled)
}
func loadAndExpandChart(as *apps.ChartAssignment) ([]*unstructured.Unstructured, bool, error) {
c, values, err := loadChart(&as.Spec.Chart)
if err != nil {
return nil, true, err
}
// Expand chart.
manifests, err := renderutil.Render(c, &chart.Config{Raw: values}, renderutil.Options{
ReleaseOptions: chartutil.ReleaseOptions{
Name: as.Name,
Namespace: as.Spec.NamespaceName,
IsInstall: true,
},
})
if err != nil {
return nil, false, errors.Wrap(err, "render chart")
}
// TODO: consider giving the synk package first-class support for raw manifests
// so that their decoding errors are fully surfaced in the ResourceSet. Otherwise,
// common YAML errors will only be surfaced one-by-one, which is tedious to handle.
res, err := decodeManifests(manifests)
if err != nil {
return nil, false, err
}
return res, false, nil
}
func loadChart(cspec *apps.AssignedChart) (*chart.Chart, string, error) {
var archive io.Reader
var err error
if cspec.Inline != "" {
archive = base64.NewDecoder(base64.StdEncoding, strings.NewReader(cspec.Inline))
} else {
archive, err = fetchChartTar(cspec.Repository, cspec.Name, cspec.Version)
if err != nil {
return nil, "", errors.Wrap(err, "retrieve chart")
}
}
c, err := chartutil.LoadArchive(archive)
if err != nil {
return nil, "", errors.Wrap(err, "load chart archive")
}
// Ensure charts in requirements.yaml are actually in packaged in.
if req, err := chartutil.LoadRequirements(c); err == nil {
if err := renderutil.CheckDependencies(c, req); err != nil {
return nil, "", errors.Wrap(err, "check chart dependencies")
}
} else if err != chartutil.ErrRequirementsNotFound {
return nil, "", errors.Wrap(err, "load chart requirements")
}
// TODO: handle empty c.Values, cspec.Values
// Build the full set of values including the default ones. Even though
// they are part of the chart, they are ignored if we don't provide
// them explicitly.
vals, err := chartutil.ReadValues([]byte(c.Values.Raw))
if err != nil {
return nil, "", errors.Wrap(err, "reading chart values")
}
vals.MergeInto(chartutil.Values(cspec.Values)) // ChartAssignment values.
valsRaw, err := vals.YAML()
if err != nil {
return nil, "", errors.Wrap(err, "encode values")
}
return c, valsRaw, nil
}
func fetchChartTar(repoURL, name, version string) (io.Reader, error) {
c := downloader.ChartDownloader{
Getters: getter.Providers{
{Schemes: []string{"http", "https"}, New: newHTTPGetter},
},
HelmHome: helmpath.Home(os.ExpandEnv("$HOME/.helm")),
Out: os.Stderr,
Keyring: os.ExpandEnv("$HOME/.gnupg/pubring.gpg"),
Verify: downloader.VerifyIfPossible,
}
dir, err := os.MkdirTemp("", name)
if err != nil {
return nil, err
}
defer os.RemoveAll(dir)
// This is only called when we fetch by repo URL rather than simply
// by e.g. stable/postgresql.
chartURL, err := repo.FindChartInRepoURL(repoURL, name, version, "", "", "", c.Getters)
if err != nil {
return nil, err
}
// NOTE(fabxc): Since we provide a full chartURL, DownloadTo will pull from exactly
// that URL. It will however check for repos in $HOME/.helm to determine
// whether it should do this with special certificates for that domain.
// (The same cert files we left blank above.)
// We might just want to implement this ourselves once we know what auth
// strategies we want to support and how users can configure them.
filename, _, err := c.DownloadTo(chartURL, version, dir)
if err != nil {
return nil, err
}
b, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
return bytes.NewReader(b), nil
}
// newHTTPGetter return a Helm chart getter for HTTP(s) repositories.
func newHTTPGetter(url, certFile, keyFile, caFile string) (getter.Getter, error) {
return getter.NewHTTPGetter(url, certFile, keyFile, caFile)
}
func decodeManifests(manifests map[string]string) ([]*unstructured.Unstructured, error) {
var resources []*unstructured.Unstructured
for k, v := range manifests {
// Sometimes README.md or NOTES.txt files make it into the template directory.
// Filter files by extension.
switch filepath.Ext(k) {
case ".json", ".yml", ".yaml":
default:
continue
}
result := resource.NewLocalBuilder().
ContinueOnError().
Unstructured().
Stream(bytes.NewBufferString(v), k).
Flatten().
Do()
if result.Err() != nil {
return nil, fmt.Errorf("get manifest: %w", result.Err())
}
infos, err := result.Infos()
if err != nil {
return nil, fmt.Errorf("get file information: %w", err)
}
for _, i := range infos {
resources = append(resources, i.Object.(*unstructured.Unstructured))
}
}
return resources, nil
}
================================================
FILE: src/go/pkg/controller/chartassignment/release_test.go
================================================
package chartassignment
import (
"testing"
"github.com/golang/mock/gomock"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
"github.com/googlecloudrobotics/core/src/go/pkg/kubetest"
"k8s.io/client-go/tools/record"
"k8s.io/helm/pkg/chartutil"
)
const (
ChartName = "testchart"
)
func verifyValues(t *testing.T, have string, wantValues chartutil.Values) {
if want, err := wantValues.YAML(); err != nil {
t.Fatal(err)
} else if want != have {
t.Fatalf("config values do not match: want\n%s\n\ngot\n%s\n", want, have)
}
}
func Test_loadChart_mergesValues(t *testing.T) {
var as apps.ChartAssignment
unmarshalYAML(t, &as, `
metadata:
name: test-assignment-1
spec:
chart:
values:
bar1: 4
bar2:
baz2: test
`)
as.Spec.Chart.Inline = kubetest.BuildInlineChart(t, ChartName /*template=*/, "", `
foo1:
baz1: "hello"
bar1: 3`)
wantValues := chartutil.Values{
"bar1": 4,
"bar2": chartutil.Values{"baz2": "test"},
"foo1": chartutil.Values{"baz1": "hello"},
}
_, vals, err := loadChart(&as.Spec.Chart)
if err != nil {
t.Fatal(err)
}
verifyValues(t, vals, wantValues)
}
func Test_loadChartWithoutTemplates_returnsZeroManifests(t *testing.T) {
var as apps.ChartAssignment
unmarshalYAML(t, &as, `
metadata:
name: test-assignment-1
spec:
chart:
values:
`)
as.Spec.Chart.Inline = kubetest.BuildInlineChart(t, ChartName /*template=*/, "", `foo: 1`)
resources, _, err := loadAndExpandChart(&as)
if err != nil {
t.Fatal(err)
}
if len(resources) > 0 {
t.Errorf("Expected no resources, got %d", len(resources))
}
}
func Test_updateSynk_callsApply(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
var as apps.ChartAssignment
unmarshalYAML(t, &as, `
metadata:
name: test-assignment-1
spec:
chart:
values:
`)
as.Spec.Chart.Inline = kubetest.BuildInlineChart(t, ChartName /*template=*/, "", `foo: 1`)
mockSynk := NewMockInterface(ctrl)
r := &release{
synk: mockSynk,
recorder: &record.FakeRecorder{},
}
rs := &apps.ResourceSet{}
mockSynk.EXPECT().Apply(gomock.Any(), "test-assignment-1", gomock.Any(), gomock.Any()).Return(rs, nil).Times(1)
// First apply, the chart should be installed.
r.update(&as)
}
func Test_deleteSynk_callsDelete(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
var as apps.ChartAssignment
unmarshalYAML(t, &as, `
metadata:
name: test-assignment-1
spec:
chart:
values:
`)
as.Spec.Chart.Inline = kubetest.BuildInlineChart(t, ChartName /*template=*/, "", `foo: 1`)
mockSynk := NewMockInterface(ctrl)
r := &release{
synk: mockSynk,
recorder: &record.FakeRecorder{},
}
mockSynk.EXPECT().Delete(gomock.Any(), "test-assignment-1").Return(nil).Times(1)
// First apply, the chart should be installed.
r.delete(&as)
}
================================================
FILE: src/go/pkg/controller/chartassignment/validator.go
================================================
// Copyright 2022 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chartassignment
import (
"context"
"fmt"
"net/http"
"strings"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
"k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// NewValidationWebhook returns a new webhook that validates ChartAssignments.
func NewValidationWebhook(mgr manager.Manager) *admission.Webhook {
return &admission.Webhook{Handler: newChartAssignmentValidator(mgr.GetScheme())}
}
// chartAssignmentValidator implements a validation webhook.
type chartAssignmentValidator struct {
decoder runtime.Decoder
}
func newChartAssignmentValidator(sc *runtime.Scheme) *chartAssignmentValidator {
return &chartAssignmentValidator{
decoder: serializer.NewCodecFactory(sc).UniversalDeserializer(),
}
}
func (v *chartAssignmentValidator) Handle(_ context.Context, req admission.Request) admission.Response {
cur := &apps.ChartAssignment{}
old := &apps.ChartAssignment{}
if err := runtime.DecodeInto(v.decoder, req.AdmissionRequest.Object.Raw, cur); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if len(req.AdmissionRequest.OldObject.Raw) > 0 {
if err := runtime.DecodeInto(v.decoder, req.AdmissionRequest.OldObject.Raw, old); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
} else {
old = nil
}
if err := v.validate(cur, old); err != nil {
return admission.Denied(err.Error())
}
return admission.Allowed("")
}
func (v *chartAssignmentValidator) validate(cur, old *apps.ChartAssignment) error {
if cur.Spec.NamespaceName == "" {
return fmt.Errorf("namespace name missing")
}
errs := validation.ValidateNamespaceName(cur.Spec.NamespaceName, false)
if len(errs) > 0 {
return fmt.Errorf("invalid namespace name %q: %s", cur.Spec.NamespaceName, strings.Join(errs, ", "))
}
if old != nil {
if cur.Spec.NamespaceName != old.Spec.NamespaceName {
return fmt.Errorf("target namespace name must not be changed")
}
}
c := cur.Spec.Chart
if c.Inline != "" {
if c.Repository != "" || c.Name != "" {
return fmt.Errorf("chart repository, and name must be empty for inline charts")
}
} else if c.Repository == "" || c.Name == "" || c.Version == "" {
return fmt.Errorf("non-inline chart must be fully specified")
}
return nil
}
================================================
FILE: src/go/pkg/controller/chartassignment/validator_test.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chartassignment
import (
"strings"
"testing"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
"sigs.k8s.io/yaml"
)
func unmarshalYAML(t *testing.T, v interface{}, s string) {
t.Helper()
if err := yaml.Unmarshal([]byte(strings.TrimSpace(s)), v); err != nil {
t.Fatal(err)
}
}
func TestValidate(t *testing.T) {
cases := []struct {
name string
old string
cur string
shouldFail bool
}{
{
name: "valid-with-inline-chart",
cur: `
spec:
namespaceName: ns1
chart:
inline: abc
values:
a: 2
b: {c: 3}
`,
},
{
name: "valid-with-reference-chart",
cur: `
spec:
namespaceName: ns1
chart:
repository: https://some.repo
name: chartname
version: 1.3.4
values:
a: 2
b: {c: 3}
`,
},
{
name: "missing-namespace-name",
cur: `
spec:
chart:
inline: abc
`,
shouldFail: true,
},
{
name: "invalid-namespace-name",
cur: `
spec:
namespaceName: ns1%2
chart:
inline: abc
`,
shouldFail: true,
},
{
name: "invalid-partial-reference",
cur: `
spec:
namespaceName: ns1%2
chart:
name: chartname
version: 1.3.4
`,
shouldFail: true,
},
{
name: "invalid-inline-and-reference-chart",
cur: `
spec:
namespaceName: ns1%2
chart:
inline: abc
repository: https://some.repo
name: chartname
version: 1.3.4
`,
shouldFail: true,
},
{
name: "namespace-name-changed",
old: `
spec:
namespaceName: ns1
chart:
inline: abc
`,
cur: `
spec:
namespaceName: ns2
chart:
inline: abc
`,
shouldFail: true,
},
}
for _, c := range cases {
v := newChartAssignmentValidator(nil)
t.Run(c.name, func(t *testing.T) {
var old, cur *apps.ChartAssignment
if c.old != "" {
old = &apps.ChartAssignment{}
unmarshalYAML(t, &old, c.old)
}
if c.cur != "" {
cur = &apps.ChartAssignment{}
unmarshalYAML(t, &cur, c.cur)
}
err := v.validate(cur, old)
if err == nil && c.shouldFail {
t.Fatal("expected failure but got none")
}
if err != nil && !c.shouldFail {
t.Fatalf("unexpected error: %s", err)
}
})
}
}
================================================
FILE: src/go/pkg/gcr/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(default_visibility = ["//visibility:public"])
go_library(
name = "go_default_library",
srcs = ["update_gcr_credentials.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/gcr",
deps = [
"//src/go/pkg/kubeutils:go_default_library",
"//src/go/pkg/robotauth:go_default_library",
"@com_github_cenkalti_backoff//:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_apimachinery//pkg/api/errors:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/types:go_default_library",
"@io_k8s_client_go//kubernetes:go_default_library",
],
)
go_test(
name = "go_default_test",
size = "small",
srcs = ["update_gcr_credential_test.go"],
embed = [":go_default_library"],
visibility = ["//visibility:private"],
deps = ["@com_github_onsi_gomega//:go_default_library"],
)
================================================
FILE: src/go/pkg/gcr/update_gcr_credential_test.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcr
import (
"testing"
. "github.com/onsi/gomega"
)
func TestDockercfgJSON(t *testing.T) {
g := NewGomegaWithT(t)
expectedJSON := `{
"https://gcr.io":{"username":"oauth2accesstoken","password":"ya29.yaddayadda","email":"not@val.id","auth":"b2F1dGgyYWNjZXNzdG9rZW46eWEyOS55YWRkYXlhZGRh"},
"https://asia.gcr.io":{"username":"oauth2accesstoken","password":"ya29.yaddayadda","email":"not@val.id","auth":"b2F1dGgyYWNjZXNzdG9rZW46eWEyOS55YWRkYXlhZGRh"},
"https://eu.gcr.io":{"username":"oauth2accesstoken","password":"ya29.yaddayadda","email":"not@val.id","auth":"b2F1dGgyYWNjZXNzdG9rZW46eWEyOS55YWRkYXlhZGRh"},
"https://us.gcr.io":{"username":"oauth2accesstoken","password":"ya29.yaddayadda","email":"not@val.id","auth":"b2F1dGgyYWNjZXNzdG9rZW46eWEyOS55YWRkYXlhZGRh"}
}`
gotJSON := DockerCfgJSON("ya29.yaddayadda")
g.Expect(gotJSON).To(MatchJSON(expectedJSON))
}
================================================
FILE: src/go/pkg/gcr/update_gcr_credentials.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Library for updating the token used to pull images from GCR in the surrounding cluster.
*/
package gcr
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"os"
"time"
"github.com/cenkalti/backoff"
"github.com/googlecloudrobotics/core/src/go/pkg/kubeutils"
"github.com/googlecloudrobotics/core/src/go/pkg/robotauth"
"github.com/googlecloudrobotics/ilog"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
)
// Name of the secret that stores the GCR pull token.
const SecretName = "gcr-json-key"
// DockerCfgJSON takes a service account key, and converts it into the JSON
// format required for k8s's docker-registry secrets.
func DockerCfgJSON(token string) []byte {
type dockercfg struct {
Username string `json:"username"`
Password string `json:"password"`
Email string `json:"email"`
Auth []byte `json:"auth"`
}
m := map[string]interface{}{}
for _, r := range []string{"gcr.io", "asia.gcr.io", "eu.gcr.io", "us.gcr.io"} {
m["https://"+r] = dockercfg{
Username: "oauth2accesstoken",
Password: string(token),
Email: "not@val.id",
Auth: []byte("oauth2accesstoken:" + token),
}
}
b, err := json.Marshal(m)
if err != nil {
slog.Error("unexpected error marshalling dockercfg", ilog.Err(err))
os.Exit(1)
}
return b
}
func patchServiceAccount(ctx context.Context, k8s *kubernetes.Clientset, name string, namespace string, patchData []byte) error {
sa := k8s.CoreV1().ServiceAccounts(namespace)
return backoff.Retry(
func() error {
_, err := sa.Patch(ctx, name, types.StrategicMergePatchType, patchData, metav1.PatchOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
return backoff.Permanent(fmt.Errorf("failed to apply %q: %v", patchData, err))
}
return err
},
// Wait up to a minute for kube-controller-manager to create
// the SA in new namespaces. I suspect there's a race condition
// if the namespace is created and then quickly deleted
// (b/281647304) which might cause this to time out - if we see
// this error showing up a lot, we could check for namespace
// deletion instead.
backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 60),
)
}
// UpdateGcrCredentials authenticates to the cloud cluster using the auth config given and updates
// the credentials used to pull images from GCR.
func UpdateGcrCredentials(ctx context.Context, k8s *kubernetes.Clientset, auth *robotauth.RobotAuth, gcpSaChain ...string) error {
tokenSource := auth.CreateRobotTokenSource(ctx, gcpSaChain...)
token, err := tokenSource.Token()
if err != nil {
return fmt.Errorf("failed to get token: %v", err)
}
// First, update the default/gcr-json-key Secret, which is the
// source-of-truth when creating new chart namespaces.
cfgData := map[string][]byte{".dockercfg": DockerCfgJSON(token.AccessToken)}
if err := kubeutils.UpdateSecret(ctx, k8s, &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: SecretName,
Namespace: "default",
Labels: map[string]string{
// The chart-assignment-controller looks for this label and copies
// gcr-json-key to new namespaces, so that the pods can pull images.
"cloudrobotics.com/copy-to-chart-namespaces": "true",
},
},
Type: corev1.SecretTypeDockercfg,
Data: cfgData,
}); err != nil {
return fmt.Errorf("failed to update default/%s: %v", SecretName, err)
}
// Tell k8s to use this key by pointing the default SA at it.
patchData := []byte(`{"imagePullSecrets": [{"name": "` + SecretName + `"}]}`)
if err := patchServiceAccount(ctx, k8s, "default", "default", patchData); err != nil {
return fmt.Errorf("failed to update kubernetes service account for namespace default: %v", err)
}
nsList, err := k8s.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("failed to list namespaces: %v", err)
}
haveError := false
for _, ns := range nsList.Items {
if ns.DeletionTimestamp != nil {
slog.Info("namespace is marked for deletion, skipping", slog.String("Namespace", ns.ObjectMeta.Name))
continue
}
namespace := ns.ObjectMeta.Name
if namespace == "default" {
// Handled above.
continue
}
// Only ever create secrets in a few specific, well-known namespaces. For app-* namespaces
// the ChartAssignment controller will create the initial secret and patch the service account.
// This avoids us putting pull secrets into eg foreign namespaces.
s := k8s.CoreV1().Secrets(namespace)
if _, err := s.Get(ctx, SecretName, metav1.GetOptions{}); k8serrors.IsNotFound(err) {
if namespace != "kube-system" {
continue
}
}
// If we get here, the namespace has a secret that we need to update or
// it is the kube-system namespace where it is okay to create the secret.
// Create or update a secret containing a docker config with the access-token.
err = kubeutils.UpdateSecret(ctx, k8s, &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: SecretName,
Namespace: namespace,
},
Type: corev1.SecretTypeDockercfg,
Data: cfgData,
})
if err != nil {
slog.Error("failed to update kubernetes secret",
slog.String("Namespace", namespace),
ilog.Err(err))
haveError = true
continue
}
// Tell k8s to use this key by pointing the default SA at it.
err = patchServiceAccount(ctx, k8s, "default", namespace, patchData)
if err != nil {
slog.Error("failed to update kubernetes service account",
slog.String("Namespace", namespace),
ilog.Err(err))
haveError = true
}
}
if haveError {
return fmt.Errorf("failed to update one or more namespaces")
}
return nil
}
================================================
FILE: src/go/pkg/kubetest/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["kubetest.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/kubetest",
visibility = ["//visibility:public"],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"//src/go/pkg/gcr:go_default_library",
"@com_github_cenkalti_backoff//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@io_k8s_api//apps/v1:go_default_library",
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_api//rbac/v1:go_default_library",
"@io_k8s_apimachinery//pkg/api/errors:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/runtime:go_default_library",
"@io_k8s_client_go//kubernetes/scheme:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
"@io_k8s_client_go//tools/clientcmd:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/client:go_default_library",
"@io_k8s_sigs_kind//pkg/apis/config/defaults:go_default_library",
"@io_k8s_sigs_kind//pkg/apis/config/v1alpha4:go_default_library",
"@io_k8s_sigs_kind//pkg/cluster:go_default_library",
"@io_k8s_sigs_yaml//:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)
================================================
FILE: src/go/pkg/kubetest/kubetest.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package kubetest provides functionality to create local Kubernetes test
// clusters and run tests against them.
package kubetest
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"encoding/base64"
"fmt"
"io"
"log/slog"
"os"
"os/exec"
"reflect"
"runtime"
"strings"
"testing"
"text/template"
"time"
"github.com/cenkalti/backoff"
crcapps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
"github.com/googlecloudrobotics/core/src/go/pkg/gcr"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
kinddefaults "sigs.k8s.io/kind/pkg/apis/config/defaults"
kindconfig "sigs.k8s.io/kind/pkg/apis/config/v1alpha4"
kindcluster "sigs.k8s.io/kind/pkg/cluster"
"sigs.k8s.io/yaml"
)
// Environment encapsulates a set of clusters and can run tests against them.
type Environment struct {
t *testing.T
cfg Config
scheme *k8sruntime.Scheme
helmPath string
synkPath string
clusters map[string]*cluster
uniqCounter int
}
type Config struct {
// The clusters that are provisioned for the test environment.
Clusters []ClusterConfig
// Registration function for additional resource types to a scheme.
SchemeFunc func(*k8sruntime.Scheme) error
}
type ClusterConfig struct {
Name string
}
type cluster struct {
genName string
cfg ClusterConfig
kind *kindcluster.Provider
kubeConfigPath string
restCfg *rest.Config
}
// New creates a new test environment.
func New(t *testing.T, cfg Config) *Environment {
e := &Environment{
helmPath: "../+non_module_deps+kubernetes_helm/helm",
synkPath: "src/go/cmd/synk/synk_/synk",
t: t,
cfg: cfg,
scheme: k8sruntime.NewScheme(),
clusters: map[string]*cluster{},
}
if cfg.SchemeFunc != nil {
cfg.SchemeFunc(e.scheme)
}
scheme.AddToScheme(e.scheme)
var g errgroup.Group
// Setup cluster concurrently.
for _, cfg := range cfg.Clusters {
// Make name unique to avoid collisions across parallel tests.
uniqName := fmt.Sprintf("%s-%x", cfg.Name, time.Now().UnixNano())
t.Logf("Assigned unique name %q to cluster %q", uniqName, cfg.Name)
cluster := &cluster{
genName: uniqName,
cfg: cfg,
}
e.clusters[cfg.Name] = cluster
g.Go(func() error {
if err := setupCluster(e.synkPath, cluster); err != nil {
// If cluster has already been created, delete it.
if cluster.kind != nil && os.Getenv("NO_TEARDOWN") == "" {
cluster.kind.Delete(cfg.Name, "")
if cluster.kubeConfigPath != "" {
os.Remove(cluster.kubeConfigPath)
}
}
return errors.Wrapf(err, "Create cluster %q", cfg.Name)
}
slog.Info("Created cluster", slog.String("Name", cfg.Name))
return nil
})
}
if err := g.Wait(); err != nil {
t.Fatal(err)
}
return e
}
func (e *Environment) Ctx() context.Context {
return context.Background()
}
func helmValues(vars map[string]string) string {
list := []string{}
for k, v := range vars {
list = append(list, fmt.Sprintf("%s=%s", k, v))
}
return strings.Join(list, ",")
}
// InstallChartArchive installs a Helm chart from a tarball on disk into a cluster.
// Arguments are provided as a map where the keys are JSON paths.
func (e *Environment) InstallChartArchive(cluster, name, namespace, path string, args map[string]string) {
c, ok := e.clusters[cluster]
if !ok {
e.t.Fatalf("Unknown cluster %q", cluster)
}
output, err := exec.Command(
e.helmPath,
"template",
"--set-string", helmValues(args),
"--name", name,
path,
).CombinedOutput()
if err != nil {
e.t.Fatalf("Synk install of %s failed: %v\nHelm output:\n%s\n", name, err, output)
}
cmd := exec.Command(
e.synkPath,
"apply",
name,
"--kubeconfig", c.kubeConfigPath,
"-n", namespace,
"-f", "-",
)
// Helm writes the templated manifests and errors alike to stderr.
// So we can just take the combined output as is.
cmd.Stdin = bytes.NewReader(output)
if output, err = cmd.CombinedOutput(); err != nil {
e.t.Fatalf("Synk install of %s failed: %v\nSynk output:\n%s\n", name, err, output)
}
}
// Client returns a new client for the cluster.
func (e *Environment) Client(cluster string) client.Client {
c, ok := e.clusters[cluster]
if !ok {
e.t.Fatalf("cluster with name %q does not exist", cluster)
}
client, err := client.New(c.restCfg, client.Options{
Scheme: e.scheme,
})
if err != nil {
e.t.Fatalf("Create client for cluster %q: %s", cluster, err)
}
return client
}
// Teardown destroys all clusters that were created for the environment.
func (e *Environment) Teardown() {
if os.Getenv("NO_TEARDOWN") != "" {
slog.Info("Skipping teardown")
return
}
slog.Info("Tearing down...")
for name, c := range e.clusters {
if err := c.kind.Delete(c.genName, ""); err != nil {
e.t.Errorf("Delete cluster %q (%q): %s", name, c.genName, err)
} else {
slog.Info("Deleted cluster",
slog.String("Name", name),
slog.String("GenName", c.genName))
}
if err := os.Remove(c.kubeConfigPath); err != nil {
e.t.Errorf("Failed to delete %q: %s", c.kubeConfigPath, err)
}
}
}
type TestFunc func(*testing.T, *Fixture)
// Run takes a list of TestFuncs and executes them as subtests.
func (e *Environment) Run(tests ...TestFunc) {
for _, test := range tests {
f := e.New(test)
e.t.Run(f.name, func(t *testing.T) {
// Recover from panics to ensure that defer Teardown will run.
defer func() {
if err := recover(); err != nil {
t.Errorf("panic: %s", err)
}
}()
f.t = t
f.testFn(t, f)
})
}
}
// Uniq takes a string and makes it unique. It should be used to generate collision-free
// names for namespaces or cluster-wide resources from subtests.
func (e *Environment) Uniq(s string) string {
e.uniqCounter++
return fmt.Sprintf("%s-%d", s, e.uniqCounter)
}
// Fixture provides functionality for a single test that is run against an environment.
type Fixture struct {
t *testing.T
name string
env *Environment
testFn TestFunc
}
// New creates a new Fixture for a test function.
func (env *Environment) New(testFn TestFunc) *Fixture {
return &Fixture{
name: runtime.FuncForPC(reflect.ValueOf(testFn).Pointer()).Name(),
testFn: testFn,
env: env,
}
}
func (f *Fixture) Ctx() context.Context {
return context.Background()
}
// ObjectKey extracts a namespace/name key from the given object.
func (f *Fixture) ObjectKey(o client.Object) client.ObjectKey {
k := client.ObjectKeyFromObject(o)
return k
}
// Uniq takes a string and makes it unique. It should be used to generate collision-free
// names for namespaces or cluster-wide resources from subtests.
func (f *Fixture) Uniq(s string) string {
return f.env.Uniq(s)
}
// FromYAML expands a YAML template with the given vals and unmarshals it into dst.
// dst is typically of type *unstructured.Unstructured or a fully specified type
// for a Kubernetes resource.
func (f *Fixture) FromYAML(tmpl string, vals, dst interface{}) {
f.t.Helper()
t, err := template.New("").Parse(tmpl)
if err != nil {
f.t.Fatalf("Invalid template: %s", err)
}
var buf bytes.Buffer
if err := t.Execute(&buf, vals); err != nil {
f.t.Fatalf("Execute template: %s", err)
}
if err := yaml.Unmarshal(bytes.TrimSpace(buf.Bytes()), dst); err != nil {
f.t.Fatal(err)
}
}
// BuildInlineChart creates an inline chart string with the given name,
// template and values.
func BuildInlineChart(t *testing.T, name, tmpl, values string) string {
t.Helper()
chartData := fmt.Sprintf(`{name: %q, version: "0.0.1"}`, name)
var encoded bytes.Buffer
bw := base64.NewEncoder(base64.StdEncoding, &encoded)
zw := gzip.NewWriter(bw)
tw := tar.NewWriter(zw)
if err := addFileToTar(tw, name+"/Chart.yaml", chartData); err != nil {
t.Fatalf("Failed to add Chart.yaml to tarball: %s", err)
}
addFileToTar(tw, name+"/templates/template.yaml", tmpl)
addFileToTar(tw, name+"/values.yaml", values)
tw.Close()
zw.Close()
bw.Close()
return encoded.String()
}
func addFileToTar(tw *tar.Writer, path, content string) error {
if err := tw.WriteHeader(&tar.Header{
Name: path,
Size: int64(len(content)),
}); err != nil {
return err
}
if _, err := io.WriteString(tw, content); err != nil {
return err
}
return nil
}
// Client returns a new client for the cluster.
func (f *Fixture) Client(cluster string) client.Client {
f.t.Helper()
return f.env.Client(cluster)
}
// setupCluster creates a kind cluster and installs synk if necessary.
func setupCluster(synkPath string, cluster *cluster) error {
kindcfg := &kindconfig.Cluster{
Nodes: []kindconfig.Node{
{
Role: kindconfig.ControlPlaneRole,
Image: kinddefaults.Image,
}, {
Role: kindconfig.WorkerRole,
Image: kinddefaults.Image,
},
},
}
cluster.kind = kindcluster.NewProvider()
// Create kubeconfig file for use by synk or the dev.
kubeConfig, err := os.CreateTemp("", "kubeconfig-")
if err != nil {
return errors.Wrap(err, "create temp kubeconfig")
}
cluster.kubeConfigPath = kubeConfig.Name()
if err := kubeConfig.Close(); err != nil {
return errors.Wrap(err, "close temp kubeconfig")
}
if err := cluster.kind.Create(
cluster.genName,
kindcluster.CreateWithV1Alpha4Config(kindcfg),
kindcluster.CreateWithKubeconfigPath(cluster.kubeConfigPath),
); err != nil {
return errors.Wrapf(err, "create cluster %q", cluster.genName)
}
kubecfgRaw, err := os.ReadFile(cluster.kubeConfigPath)
if err != nil {
return errors.Wrap(err, "read kube config")
}
kubecfg, err := clientcmd.NewClientConfigFromBytes(kubecfgRaw)
if err != nil {
return errors.Wrap(err, "decode kube config")
}
cluster.restCfg, err = kubecfg.ClientConfig()
if err != nil {
return errors.Wrap(err, "get rest config")
}
fmt.Printf("To use the cluster, run KUBECONFIG=%s kubectl cluster-info", cluster.kubeConfigPath)
// Setup permissive binding we also have in cloud and robot clusters.
ctx := context.Background()
c, err := client.New(cluster.restCfg, client.Options{})
if err != nil {
return errors.Wrap(err, "create client")
}
if err := c.Create(ctx, &rbac.ClusterRoleBinding{
ObjectMeta: meta.ObjectMeta{
Name: "permissive-binding",
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "cluster-admin",
},
Subjects: []rbac.Subject{{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Group",
Name: "system:serviceaccounts",
}},
}); err != nil {
return errors.Wrap(err, "create permissive role binding")
}
// Setup service account and create image pull secrets.
if token := os.Getenv("ACCESS_TOKEN"); token != "" {
// Use the same secret name as the GCR credential refresher would
// on robots.
// This makes some testing of components easier, that assume this
// secret to exist, e.g. ChartAssignment controller.
secret := &core.Secret{
ObjectMeta: meta.ObjectMeta{
Namespace: "default",
Name: gcr.SecretName,
},
Type: core.SecretTypeDockercfg,
Data: map[string][]byte{
".dockercfg": gcr.DockerCfgJSON(token),
},
}
if err := c.Create(ctx, secret); err != nil {
return errors.Wrap(err, "create pull secret")
}
if err := backoff.Retry(
func() error {
var sa core.ServiceAccount
err := c.Get(ctx, client.ObjectKey{"default", "default"}, &sa)
if k8serrors.IsNotFound(err) {
return errors.New("not found")
} else if err != nil {
return backoff.Permanent(errors.Wrap(err, "get service account"))
}
sa.ImagePullSecrets = append(sa.ImagePullSecrets, core.LocalObjectReference{
Name: gcr.SecretName,
})
if err = c.Update(ctx, &sa); k8serrors.IsConflict(err) {
return fmt.Errorf("conflict")
} else if err != nil {
return backoff.Permanent(errors.Wrap(err, "update service account"))
}
return nil
},
backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 60),
); err != nil {
return errors.Wrap(err, "inject pull secret")
}
}
// Wait for a node to be ready, by checking for node taints (incl. NotReady)
// (context: b/128660997)
if err := backoff.Retry(
func() error {
var nds core.NodeList
if err := c.List(ctx, &nds); err != nil {
return backoff.Permanent(err)
}
for _, n := range nds.Items {
if len(n.Spec.Taints) == 0 {
return nil
}
}
return fmt.Errorf("taints not removed")
},
backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 240),
); err != nil {
return errors.Wrap(err, "wait for node taints to be removed")
}
cmd := exec.Command(
synkPath,
"init",
"--kubeconfig", cluster.kubeConfigPath,
)
if output, err := cmd.CombinedOutput(); err != nil {
return errors.Errorf("install Helm: %v; output:\n%s\n", err, output)
}
return nil
}
// DeploymentReady returns a condition func that checks whether all replicas of a deployment
// are available.
func DeploymentReady(ctx context.Context, c client.Client, namespace, name string) error {
var d apps.Deployment
if err := c.Get(ctx, client.ObjectKey{namespace, name}, &d); err != nil {
return backoff.Permanent(errors.Wrapf(err, "get deployment %s/%s", namespace, name))
}
if d.Spec.Replicas == nil {
if d.Status.ReadyReplicas <= 0 {
return fmt.Errorf("Replicas not ready")
}
return nil
} else if d.Status.ReadyReplicas != *d.Spec.Replicas {
return fmt.Errorf("Replicas not ready")
}
return nil
}
// ChartAssignmentHasStatus returns a condition func that checks if a given
// ChartAssignment has the expected status. Calls to the condition func update
// the ChartAssignment in place.
func (f *Fixture) ChartAssignmentHasStatus(ca *crcapps.ChartAssignment, expected crcapps.ChartAssignmentPhase) func() error {
client := f.Client(ca.Spec.ClusterName)
return func() error {
if err := client.Get(f.Ctx(), f.ObjectKey(ca), ca); err != nil {
return backoff.Permanent(err)
}
if expected == crcapps.ChartAssignmentPhaseSettled && ca.Status.Phase == crcapps.ChartAssignmentPhaseReady {
// phase can go straight from Updating->Ready and skip Settled, this is OK.
return nil
}
if ca.Status.Phase != expected {
f.t.Logf("Status: %+v", ca.Status)
return fmt.Errorf("chart status != %s", expected)
}
return nil
}
}
================================================
FILE: src/go/pkg/kubeutils/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["kubeutils.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/kubeutils",
visibility = ["//visibility:public"],
deps = [
"@com_github_cenkalti_backoff//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_apimachinery//pkg/api/errors:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_client_go//kubernetes:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
"@io_k8s_client_go//tools/clientcmd:go_default_library",
"@org_golang_x_oauth2//:go_default_library",
],
)
================================================
FILE: src/go/pkg/kubeutils/kubeutils.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubeutils
import (
"context"
"fmt"
"net/http"
"os"
"os/user"
"path/filepath"
"strings"
"github.com/cenkalti/backoff"
"github.com/pkg/errors"
"golang.org/x/oauth2"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
const (
LocalContext = "kubernetes-admin@kubernetes"
localConfig = "~/.kube/config"
deletionTimeoutSeconds = 60
)
// Expand paths of the form "~/path" to absolute paths.
func ExpandUser(path string) string {
if path[:2] != "~/" {
return path
}
usr, _ := user.Current()
return filepath.Join(usr.HomeDir, path[2:])
}
// CloudKubernetesContextName generates the name of the cloud kubernetes context from the GCP
// project ID and region.
func CloudKubernetesContextName(projectID, region string) string {
return fmt.Sprintf("gke_%s_%s-c_cloud-robotics", projectID, region)
}
// GetCloudKubernetesContext returns the name of the cloud kubernetes context.
func GetCloudKubernetesContext() (string, error) {
gcpProjectID, defined := os.LookupEnv("GCP_PROJECT_ID")
if !defined {
return "", fmt.Errorf("GCP_PROJECT_ID environment variable is not defined")
}
gcpRegion, defined := os.LookupEnv("GCP_REGION")
if !defined {
return "", fmt.Errorf("GCP_REGION environment variable is not defined")
}
return CloudKubernetesContextName(gcpProjectID, gcpRegion), nil
}
// GetRobotKubernetesContext returns the name of the robot kubernetes context provided by the
// kubernetes-relay-client.
func GetRobotKubernetesContext() (string, error) {
gcpProjectID, defined := os.LookupEnv("GCP_PROJECT_ID")
if !defined {
return "", fmt.Errorf("GCP_PROJECT_ID environment variable is not defined")
}
return fmt.Sprintf("%s-robot", gcpProjectID), nil
}
// LoadOutOfClusterConfig loads a local kubernetes config on the robot or workstation.
func LoadOutOfClusterConfigLocal() (*rest.Config, error) {
return LoadOutOfClusterConfig(LocalContext)
}
func LoadOutOfClusterConfig(context string) (*rest.Config, error) {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.ExplicitPath = ExpandUser(localConfig)
overrides := &clientcmd.ConfigOverrides{}
overrides.CurrentContext = context
cfg := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides)
return cfg.ClientConfig()
}
// PrefixingRoundtripper is a HTTP roundtripper that adds a specified prefix to
// all HTTP requests. We need to use it instead of setting APIPath because
// autogenerated and dynamic Kubernetes clients overwrite the REST config's
// APIPath.
type PrefixingRoundtripper struct {
Prefix string
Base http.RoundTripper
}
func (pr *PrefixingRoundtripper) RoundTrip(r *http.Request) (*http.Response, error) {
// Avoid an extra roundtrip for the protocol upgrade
r.URL.Scheme = "https"
if !strings.HasPrefix(r.URL.Path, pr.Prefix+"/") {
r.URL.Path = pr.Prefix + r.URL.Path
}
resp, err := pr.Base.RoundTrip(r)
return resp, err
}
// BuildCloudKubernetesConfig build a kubernetes config for authenticated access to the cloud
// project.
func BuildCloudKubernetesConfig(ts oauth2.TokenSource, remoteServer string) *rest.Config {
return &rest.Config{
Host: remoteServer,
APIPath: "/apis",
WrapTransport: func(base http.RoundTripper) http.RoundTripper {
rt := &PrefixingRoundtripper{
Prefix: "/apis/core.kubernetes",
Base: &oauth2.Transport{Source: ts, Base: base},
}
return rt
},
}
}
// UpdateSecret (over-) writes a k8s secret.
func UpdateSecret(ctx context.Context, k8s kubernetes.Interface, input *corev1.Secret) error {
s := k8s.CoreV1().Secrets(input.Namespace)
b := backoff.WithMaxRetries(backoff.NewExponentialBackOff(), 5)
return backoff.Retry(func() error {
secret, err := s.Get(ctx, input.Name, metav1.GetOptions{})
if err != nil {
if k8serrors.IsNotFound(err) {
_, err = s.Create(ctx, input, metav1.CreateOptions{})
return backoff.Permanent(errors.Wrap(err, "create secret"))
}
return backoff.Permanent(errors.Wrap(err, "get secret"))
}
secret.Labels = input.Labels
secret.Annotations = input.Annotations
secret.Data = input.Data
_, err = s.Update(ctx, secret, metav1.UpdateOptions{})
if k8serrors.IsConflict(err) {
// Retry conflicts.
return err
}
return backoff.Permanent(errors.Wrap(err, "update secret"))
}, b)
}
================================================
FILE: src/go/pkg/robotauth/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["robotauth.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/robotauth",
visibility = ["//visibility:public"],
deps = [
"//src/go/pkg/kubeutils:go_default_library",
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_client_go//kubernetes:go_default_library",
"@org_golang_x_oauth2//:go_default_library",
"@org_golang_x_oauth2//jws:go_default_library",
"@org_golang_x_oauth2//jwt:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["robotauth_test.go"],
embed = [":go_default_library"],
deps = [
"@io_k8s_client_go//kubernetes/fake:go_default_library",
"@org_golang_x_oauth2//jws:go_default_library",
],
)
================================================
FILE: src/go/pkg/robotauth/robotauth.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The robotauth package contains the class for reading and writing the
// robot-id.json file. This file contains the id & private key of a robot
// that's connected to a Cloud project.
package robotauth
import (
"bytes"
"context"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"net/http"
"net/mail"
"os"
"path/filepath"
"strings"
"time"
"github.com/googlecloudrobotics/core/src/go/pkg/kubeutils"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
"golang.org/x/oauth2/jwt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
const (
// TODO(ensonic): setup-dev creates a key and stores it, only for the ssh-app to read it
credentialsFile = "~/.config/cloud-robotics/robot-id.json"
)
// Object containing ID, as stored in robot-id.json.
type RobotAuth struct {
RobotName string `json:"id"`
ProjectId string `json:"project_id"`
PublicKeyRegistryId string `json:"public_key_registry_id"`
PrivateKey []byte `json:"private_key"`
Domain string `json:"domain"`
}
func filename() string {
return kubeutils.ExpandUser(credentialsFile)
}
// LoadFromFile loads key from json file. If keyfile is "", it tries to load
// from the default location.
func LoadFromFile(keyfile string) (*RobotAuth, error) {
if keyfile == "" {
keyfile = filename()
}
raw, err := os.ReadFile(keyfile)
if err != nil {
return nil, fmt.Errorf("failed to read %v: %v", credentialsFile, err)
}
var robotAuth RobotAuth
err = json.Unmarshal(raw, &robotAuth)
if err != nil {
return nil, fmt.Errorf("failed to parse %v: %v", credentialsFile, err)
}
return &robotAuth, nil
}
func LoadFromK8sSecret(ctx context.Context, clientset kubernetes.Interface, namespace string) (*RobotAuth, error) {
s, err := clientset.CoreV1().Secrets(namespace).Get(ctx, "robot-auth", metav1.GetOptions{})
if err != nil {
return nil, err
}
encoded, ok := s.Data["json"]
if !ok {
return nil, fmt.Errorf("could not find json key in secret's data")
}
var ret RobotAuth
if err := json.NewDecoder(bytes.NewReader(encoded)).Decode(&ret); err != nil {
return nil, err
}
return &ret, nil
}
// StoreInFile writes a newly-chosen ID to disk.
func (r *RobotAuth) StoreInFile() error {
raw, err := json.Marshal(r)
if err != nil {
return fmt.Errorf("failed to serialize ID: %v", err)
}
file := filename()
if err := os.MkdirAll(kubeutils.ExpandUser(filepath.Dir(file)), 0700); err != nil {
return err
}
err = os.WriteFile(file, raw, 0600)
if err != nil {
return fmt.Errorf("failed to write %v: %v", credentialsFile, err)
}
return nil
}
// StoreInK8sSecret writes new robot-id to kubernetes secret.
func (r *RobotAuth) StoreInK8sSecret(ctx context.Context, clientset kubernetes.Interface, namespace string) error {
raw, err := json.Marshal(r)
if err != nil {
return fmt.Errorf("failed to serialize ID: %v", err)
}
return kubeutils.UpdateSecret(ctx, clientset, &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "robot-auth",
Namespace: namespace,
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{
"json": raw,
},
})
}
// CreatePrivateKey creates a private key.
// The private key is written to the RobotAuth struct.
func (r *RobotAuth) CreatePrivateKey() error {
key, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return err
}
pkcs8, err := x509.MarshalPKCS8PrivateKey(key)
if err != nil {
return err
}
r.PrivateKey = pem.EncodeToMemory(&pem.Block{
Type: "PRIVATE KEY",
Bytes: pkcs8,
})
return nil
}
func (r *RobotAuth) getTokenEndpoint() string {
return fmt.Sprintf("https://%s/apis/core.token-vendor/v1/token.oauth2", r.Domain)
}
// CreateRobotTokenSource creates an OAuth2 token source for the token vendor.
// This token source returns Google Cloud access token minted for either
// the robot-service@ service account or impersonated via provided gcpSaChain.
func (r *RobotAuth) CreateRobotTokenSource(ctx context.Context, gcpSaChain ...string) oauth2.TokenSource {
c := jwt.Config{
// Will be used as "issuer" of the outgoing JWT. Is not formatted as an email though
Email: r.PublicKeyRegistryId,
Expires: time.Minute * 30,
PrivateKey: r.PrivateKey,
Scopes: []string{},
TokenURL: r.getTokenEndpoint(),
}
if len(gcpSaChain) > 0 {
// TokenVendor expects single service-account email.
// todo: consider allowing token-vendor to accept SA chain.
c.Subject = gcpSaChain[0]
}
return c.TokenSource(ctx)
}
// CreateJWT allows to create a JWT for authentication against the token vendor.
// This does not grant Google Cloud access, but can be used for explicit
// authentication with the token vendor.
func (r *RobotAuth) CreateJWT(ctx context.Context, lifetime time.Duration) (string, error) {
p, _ := pem.Decode(r.PrivateKey)
if p == nil {
return "", fmt.Errorf("decode private key")
}
parsedKey, err := x509.ParsePKCS8PrivateKey(p.Bytes)
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(p.Bytes)
if err != nil {
return "", fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err)
}
}
parsed, ok := parsedKey.(*rsa.PrivateKey)
if !ok {
return "", fmt.Errorf("private key is invalid")
}
// We re-use the token audience here.
// While it would be nicer to use a specific token.verify endpoint here,
// the token-vendor takes a full path it verifies.
// This would allow this token to be used for getting an OAuth token, but
// the token and identity endpoints use the same access protection,
// so there's no functional difference either way.
claimSet := &jws.ClaimSet{
Iss: r.PublicKeyRegistryId,
Aud: r.getTokenEndpoint(),
Sub: r.RobotName,
Prn: r.RobotName,
Exp: time.Now().Add(lifetime).Unix(),
}
ret, err := jws.Encode(&jws.Header{Algorithm: "RS256", Typ: "JWT"}, claimSet, parsed)
if err != nil {
return "", err
}
return ret, nil
}
// ServiceAccountEmail takes name of service account and returns its
// email form for given RobotAuth.
//
// Note: method will also accept SA in email form and return it AS-IS if it
// resembles GCP service account. This allows caller to use this method
// transparently for a situation where SA from different project than
// RobotAuth#ProjectId is needed for robot. This should be very rare.
func (r *RobotAuth) ServiceAccountEmail(saName string) (string, error) {
if saName == "" {
return "", errors.New("empty name")
}
if _, err := mail.ParseAddress(saName); err != nil {
return fmt.Sprintf("%s@%s.iam.gserviceaccount.com", saName, r.ProjectId), nil
}
if !strings.HasSuffix(saName, ".iam.gserviceaccount.com") {
return "", fmt.Errorf("unexpected service account email value, %s", saName)
}
return saName, nil
}
// robotJWTSource gets robot JWTs from the metadata-server.
type robotJWTSource struct {
client http.Client
}
// Token gets a robot JWT from the metadata-server or returns an error.
// Token must be safe for concurrent use by multiple goroutines.
// The returned Token must not be modified.
func (ts *robotJWTSource) Token() (*oauth2.Token, error) {
req, err := http.NewRequest(http.MethodGet, "http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/identity", nil)
if err != nil {
return nil, err
}
resp, err := ts.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
// Read body before checking status to ensure connection can be reused.
responseBytes, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("read response body: %w", err)
}
responseString := string(responseBytes)
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("status code %d, body:\n%s", resp.StatusCode, responseString)
}
// Decode token so we can set the expiry and the client knows when to
// refresh. No need to check the signature here (onprem) as it will be
// checked by the cloud backend.
claims, err := jws.Decode(responseString)
if err != nil {
return nil, fmt.Errorf("decode: %w", err)
}
return &oauth2.Token{
TokenType: "Bearer",
AccessToken: responseString,
Expiry: time.Unix(claims.Exp, 0),
}, nil
}
const (
// Minimum remaining lifetime of JWTs from CreateJWTSource().
jwtMinLifetime = time.Minute
)
// CreateJWTSource creates an OAuth2 token source for the JWTs signed by the
// robot's private key.
func CreateJWTSource() oauth2.TokenSource {
ts := &robotJWTSource{}
return oauth2.ReuseTokenSourceWithExpiry(nil, ts, jwtMinLifetime)
}
================================================
FILE: src/go/pkg/robotauth/robotauth_test.go
================================================
package robotauth
import (
"context"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"testing/quick"
"time"
"golang.org/x/oauth2/jws"
"k8s.io/client-go/kubernetes/fake"
)
// Test keys copied from token-vendor oauth/jwt
const testPubKey = `
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvTGUksynbWhvZkHNJn8C
2oXVD400jiK4T0JoyS/SwbBGwFr3OJGlPwXCsvAPAzmpTuZpge6T3pnIcO/s97sM
gyld9ZYio7SQiiRV/nwYZittGf9/yfHSNDJUvT25yhuK2p3UqRCom1a3KljeXbxX
vGuYG48IH0kqAQbYBI/0lAV3H5pkdXPFZC6PHltC3jySVIOg7qPXrNuxdxmg/gmz
Q9+NmKvXWKATAPax1yYoESaZtc22aCZWouIdJr3baYlfBb4w8stoJPoONuyn4ard
17gywb46HHGl2XoY+Y5pihwvctsFeZXLfYwUmFPfgncQHJ02lCV3+Xyk4AAZy3xD
pwIDAQAB
-----END PUBLIC KEY-----`
const testPrivKey = `
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAvTGUksynbWhvZkHNJn8C2oXVD400jiK4T0JoyS/SwbBGwFr3
OJGlPwXCsvAPAzmpTuZpge6T3pnIcO/s97sMgyld9ZYio7SQiiRV/nwYZittGf9/
yfHSNDJUvT25yhuK2p3UqRCom1a3KljeXbxXvGuYG48IH0kqAQbYBI/0lAV3H5pk
dXPFZC6PHltC3jySVIOg7qPXrNuxdxmg/gmzQ9+NmKvXWKATAPax1yYoESaZtc22
aCZWouIdJr3baYlfBb4w8stoJPoONuyn4ard17gywb46HHGl2XoY+Y5pihwvctsF
eZXLfYwUmFPfgncQHJ02lCV3+Xyk4AAZy3xDpwIDAQABAoH/bKMLrT/W4/wT+6PN
KU3FVbWDompywyssqlZ31Q6g9pdCCTIyw0jemlG0ewtdk3yIu8WS0Aku36NudWtP
pvDBPo+CZILRYS9N0AUNXBPl7sUA4OzVdCBnk5FTF1daV7N5CA+ZDXuDVa91fduJ
1ElSF9+weCKph0170Rsc74G570Q1ypoee/gdhkwwK5aYfTs+Z6fpaEnHaPzcwYkF
4QTsCshtoGZslmgZt8Tm7sfDDFWD20fmr1s350Ne1I7VYRFiyGbQI+IB+4pc9LSX
8CHcHIzHidKYTSG6YwpDsNRN/BkQklhsuLnNacMFFddO0IHIS0GlLBJbCRkN3b/n
/XC5AoGBAPZIN3VCpSEAw6OsM1zL4CBcq2dOb5b87rAeUmSkmW415fuyUNJJBcaf
1pliCQNeg9RzRDuHOs6BTU9i+fLcbOwSapFzGxzqnv4xmkHbj1Xs52Z+97HvKKld
xlQ/TF72WGITZVwmQWxJ9Rgx+bi7OirzOtQYoNpFoF5vHgyGrUZ7AoGBAMSosXUk
uLMzrZjH4Oetp8tq9Udyk7Xkk7booU7I0iPb/Dvadsuc9WZI+LP4R3iWmtLcJOUr
WyfliCLvbWtF4aW2vo7hvffe19krg/H26WEuBTuQGCZv8B5o8xHSecb7jbrKt9g6
r8I5kr+2tAZKLC6mtFdJgfSXNO9tveBxe+XFAoGBAIwQljnCJVeXr6wuCygDavv8
uB6QpTYhsz3GgOVsFzZuwNVcnEp77SUBUnL5JlccMa1pwKx6RB+dufIkQDK22duI
vcLqy8iuRq4aV7iMvgAIM7I/E2/GrEFma50OQsjfIXTlwwedWifUB+gyw+sjz/kN
S6/EMfbxEjuixlwpW/JxAoGBAKG5dM44F6hPPFijL0J3XcD8QZ+zCuQPiKZnopgO
sDmLJF/4Za9Gccze/5/I8sWpXMNBBRptUDZ8HTtVmK8aNdm4cfdAj5/y46EVlxl6
Cyy+0tDLzAB4F4h6mEI0y66mmkRdh1jL0lQwUo1Ua7Gsd68Zqr8JlVSWsJKhtf+I
c/JdAoGAFCSDby7ByX0W23Su3R28+9lWRSmNG79kLRLzlXsCwXTUTFh/TjAaEKgK
vwi8dtCSMNnJLCUXGx5cjTndgjTl8Woah0wy9XNNeIUjI8JPxIwXmmjppPKdCBI4
0ZyqQjgPJvwfY7lxFjE10ypv99QDlEbnwngt6bvSkY+6+DQTUDw=
-----END RSA PRIVATE KEY-----
`
// Generate random RobotAuth values to ensure any secret stored in the k8s
// secret can be retrieved without modification to the value
func TestK8sSecretLoadStoreRoundtrip(t *testing.T) {
// FWIW, this should require a better generator to get valid k8s
// namespace names generated, and create the namespace before
// we try to store the secret in it.
// I can only assume the fake server is super lenient here.
if err := quick.Check(func(a RobotAuth, ns string) bool {
cs := fake.NewSimpleClientset()
if err := a.StoreInK8sSecret(context.TODO(), cs, ns); err != nil {
t.Errorf("Failed to store k8s secret: %v", err)
}
l, err := LoadFromK8sSecret(context.TODO(), cs, ns)
if err != nil {
t.Errorf("Failed to read k8s secret: %v", err)
}
return reflect.DeepEqual(&a, l)
}, nil); err != nil {
t.Errorf("Failed to check roundtrip of Store/Load K8sSecret: %v", err)
}
}
func TestCreateJWT(t *testing.T) {
a := RobotAuth{
PrivateKey: []byte(testPrivKey),
}
jwtk, err := a.CreateJWT(context.TODO(), time.Minute*10)
if err != nil {
t.Errorf("Failed to create JWT: %v", err)
}
p, _ := pem.Decode([]byte(testPubKey))
if p == nil {
t.Fatalf("Failed to pem decode pubkey")
}
parsedKey, err := x509.ParsePKIXPublicKey(p.Bytes)
if err != nil {
t.Fatalf("Failed to decode public key: %v", err)
}
parsed, ok := parsedKey.(*rsa.PublicKey)
if !ok {
t.Fatalf("Failed to cast public key to rsa")
}
if err := jws.Verify(jwtk, parsed); err != nil {
t.Errorf("Failed to validate created JWT: %v", err)
}
}
type mockRoundTripper struct {
response *http.Response
}
func (rt *mockRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
return rt.response, nil
}
func makeTokenResponse(token string) *http.Response {
recorder := httptest.NewRecorder()
recorder.Header().Add("Content-Type", "application/json")
recorder.WriteString(token)
return recorder.Result()
}
const (
// unsigned token with known fields, check/generate with https://jwt.io and
// a throwaway private key:
// ssh-keygen -t rsa -b 4096 -m PEM -f jwtRS256.key
testToken = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJyb2JvdC1yb2JvdC1zaW0iLCJhdWQiOiJodHRwczovL3d3dy5lbmRwb2ludHMubXktdGVzdC1wcm9qZWN0LmNsb3VkLmdvb2cvYXBpcy9jb3JlLnRva2VuLXZlbmRvci92MS90b2tlbi5vYXV0aDIiLCJleHAiOjE3NDE2MTU3MjEsImlhdCI6MTc0MTYxNDgxMSwic3ViIjoicm9ib3Qtc2ltIiwicHJuIjoicm9ib3Qtc2ltIn0."
textTokenExpiryUnix = 1741615721
)
func TestCreateJWTSource(t *testing.T) {
mockRT := &mockRoundTripper{}
ts := &robotJWTSource{
client: http.Client{
Transport: mockRT,
},
}
mockRT.response = makeTokenResponse(testToken)
token, err := ts.Token()
if err != nil {
t.Fatalf("ts.Token() failed unexpectedly: %v", err)
}
if want := "Bearer"; token.TokenType != want {
t.Errorf("token.TokenType = %q, want %q", token.TokenType, want)
}
if want := time.Unix(textTokenExpiryUnix, 0); token.Expiry != want {
t.Errorf("token.Expiry = %v, want %v", token.Expiry, want)
}
}
================================================
FILE: src/go/pkg/setup/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test", "gomock")
go_library(
name = "go_default_library",
srcs = ["setupcommon.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/setup",
visibility = ["//visibility:public"],
deps = [
"//src/go/pkg/robotauth:go_default_library",
"//src/go/pkg/setup/util:go_default_library",
"@com_github_cenkalti_backoff//:go_default_library",
"@io_k8s_apimachinery//pkg/api/errors:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured:go_default_library",
"@io_k8s_client_go//dynamic:go_default_library",
"@org_golang_x_crypto//ssh/terminal:go_default_library",
],
)
gomock(
name = "mock_factory",
out = "mock_factory_test.go",
interfaces = ["Factory"],
library = "//src/go/pkg/setup/util:go_default_library",
package = "setup",
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
size = "small",
srcs = [
"mock_factory_test.go",
"setupcommon_test.go",
],
embed = [":go_default_library"],
deps = [
"//src/go/pkg/setup/util:go_default_library", # keep
"@com_github_golang_mock//gomock:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured:go_default_library",
],
)
================================================
FILE: src/go/pkg/setup/setupcommon.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package setup
import (
"bytes"
"context"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"fmt"
"log/slog"
"net"
"net/http"
"os"
"strings"
"time"
"github.com/cenkalti/backoff"
"github.com/googlecloudrobotics/core/src/go/pkg/robotauth"
"github.com/googlecloudrobotics/core/src/go/pkg/setup/util"
"golang.org/x/crypto/ssh/terminal"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/dynamic"
)
// GetRobotName returns a valid robot name or an error. If the robotName parameter
// is non-empty, it checks if it is valid. If it is an empty string, the user is
// prompted to select a robot.
func GetRobotName(ctx context.Context, f util.Factory, client dynamic.ResourceInterface, robotName string) (string, error) {
if robotName == "" {
exitIfNotRunningInTerminal("ERROR: --robot-name not specified")
robots, err := client.List(ctx, metav1.ListOptions{})
if err != nil {
return "", err
}
robotName, err := selectRobot(f, robots.Items)
if err != nil {
return "", err
}
return robotName, nil
}
_, err := client.Get(ctx, robotName, metav1.GetOptions{})
if err != nil {
if s, ok := err.(*apierrors.StatusError); ok && s.ErrStatus.Reason == metav1.StatusReasonNotFound {
return "", fmt.Errorf("Robot %v not found.", robotName)
}
return "", err
}
return robotName, nil
}
// exitIfNotRunningInTerminal checks if stdin is connected to a terminal. If
// not, it prints the given message and exits.
func exitIfNotRunningInTerminal(message ...interface{}) {
if !terminal.IsTerminal(int(os.Stdin.Fd())) {
fmt.Fprintln(os.Stderr, message...)
os.Exit(1)
}
}
// Ask the user to select the robot from a list. Saves name to disk after
// selection.
func selectRobot(f util.Factory, robots []unstructured.Unstructured) (string, error) {
fmt.Printf(" # %-20v %-10v %-16v\n", "Name", "Type", "Create Time")
for i, robot := range robots {
spec, ok := robot.Object["spec"].(map[string]interface{})
if !ok {
slog.Warn("unmarshaling robot failed: spec is not a map")
continue
}
fmt.Printf("%3v %-20v %-10v %v\n", i+1, robot.GetName(), spec["type"], robot.GetCreationTimestamp().String())
}
fmt.Print("Select robot: ")
var ix int
for {
var err error
ix, err = f.ScanInt()
if err == nil && 1 <= ix && ix <= len(robots) {
break
}
fmt.Printf("Please enter a number (1-%v): ", len(robots))
}
return robots[ix-1].GetName(), nil
}
func newExponentialBackoff(initialInterval time.Duration, multiplier float64, retries uint64) backoff.BackOff {
exponentialBackoff := backoff.ExponentialBackOff{
InitialInterval: initialInterval,
Multiplier: multiplier,
Clock: backoff.SystemClock,
}
exponentialBackoff.Reset()
return backoff.WithMaxRetries(&exponentialBackoff, retries)
}
// WaitForDNS manually resolves the domain name with retries to give a better
// error in the case of failure. This is useful to catch errors during first
// interaction with the cluster and cloud-project,
func WaitForDNS(domain string, retries uint64) error {
slog.Info("DNS lookup", slog.String("Domain", domain))
if err := backoff.RetryNotify(
func() error {
ips, err := net.LookupIP(domain)
if err != nil {
return err
}
// Check that the results contain an ipv4 addr. Initially, coredns may only
// return ipv6 addresses in which case helm will fail.
for _, ip := range ips {
if ip.To4() != nil {
return nil
}
}
return fmt.Errorf("IP not found")
},
newExponentialBackoff(time.Second, 2, retries),
func(_ error, _ time.Duration) {
slog.Info("... Retry dns", slog.String("Domain", domain))
},
); err != nil {
return fmt.Errorf("DNS lookup for %q failed: %w", domain, err)
}
return nil
}
// WaitForService tests a given cloud endpoint with a HEAD request a few times.
// This lets us wait for the service to be available or error with a better
// message.
func WaitForService(client *http.Client, url string, retries uint64) error {
slog.Info("Service probe", slog.String("URL", url))
if err := backoff.RetryNotify(
func() error {
_, err := client.Head(url)
return err
},
newExponentialBackoff(time.Second, 2, retries),
func(_ error, _ time.Duration) {
slog.Info("... Retry service", slog.String("URL", url))
},
); err != nil {
return fmt.Errorf("service probe for %q failed: %w", url, err)
}
return nil
}
// PublishCredentialsToCloud registers a public-key in the cloud under the ID
// given as part of the RobotAuth struct.
func PublishCredentialsToCloud(client *http.Client, auth *robotauth.RobotAuth, retries uint64) error {
if len(auth.PrivateKey) == 0 {
return fmt.Errorf("Missing key in given auth object")
}
if err := isKeyRegistryAvailable(auth, client, retries); err != nil {
return fmt.Errorf("Failed to connect to cloud key registry: %w", err)
}
if err := publishPublicKeyToCloudRegistry(auth, client); err != nil {
return fmt.Errorf("Failed to register key with cloud key registry: %w", err)
}
return nil
}
func isKeyRegistryAvailable(auth *robotauth.RobotAuth, client *http.Client, retries uint64) error {
// Make sure the cloud cluster take requests
url := fmt.Sprintf("https://%s/apis/core.token-vendor/v1/public-key.read", auth.Domain)
if err := WaitForService(client, url, retries); err != nil {
return fmt.Errorf("Failed to connect to the cloud cluster: %w. Please retry in 5 minutes.", err)
}
return nil
}
func publishPublicKeyToCloudRegistry(auth *robotauth.RobotAuth, client *http.Client) error {
pubKey, err := getPublicKey(auth.PrivateKey)
if err != nil {
return err
}
slog.Info("Publishing the robot's public key to cloud key registry")
url := fmt.Sprintf(
"https://%s/apis/core.token-vendor/v1/public-key.publish?device-id=%s",
auth.Domain,
auth.PublicKeyRegistryId)
response, err := client.Post(
url, "application/x-pem-file", strings.NewReader(string(pubKey)))
if err != nil {
return fmt.Errorf("publishing the token failed: %w", err)
} else if response.StatusCode != http.StatusOK {
responseBody := new(bytes.Buffer)
responseBody.ReadFrom(response.Body)
return fmt.Errorf(
"TokenVendor responded with %d %s: %s",
response.StatusCode,
response.Status,
responseBody.String())
}
return nil
}
func getPublicKey(privateKey []byte) ([]byte, error) {
block, _ := pem.Decode(privateKey)
if block == nil {
return nil, fmt.Errorf("Private key is not a valid PEM object")
}
var rsaKey *rsa.PrivateKey
if block.Type == "RSA PRIVATE KEY" {
key, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return nil, err
}
rsaKey = key
} else if block.Type == "PRIVATE KEY" {
key, err := x509.ParsePKCS8PrivateKey(block.Bytes)
if err != nil {
return nil, err
}
rsaKey = key.(*rsa.PrivateKey)
} else {
return nil, fmt.Errorf("Expected a private key, got %s", block.Type)
}
pubKey, err := x509.MarshalPKIXPublicKey(&rsaKey.PublicKey)
if err != nil {
return nil, err
}
return pem.EncodeToMemory(&pem.Block{
Type: "PUBLIC KEY",
Bytes: pubKey,
}), nil
}
// megeMaps returns `base` with `additions` added on top.
// I.e., if the same key is present in both maps, the one from `additions` wins.
func mergeMaps(base, additions map[string]string) map[string]string {
result := make(map[string]string)
for k, v := range base {
result[k] = v
}
for k, v := range additions {
result[k] = v
}
return result
}
// CreateOrUpdateRobot adds a new robot-cr or updates an existing one.
func CreateOrUpdateRobot(ctx context.Context, client dynamic.ResourceInterface, robotName, robotType, project string, labels map[string]string, annotations map[string]string) error {
robot, err := client.Get(ctx, robotName, metav1.GetOptions{})
if err != nil {
if s, ok := err.(*apierrors.StatusError); ok && s.ErrStatus.Reason == metav1.StatusReasonNotFound {
robot := &unstructured.Unstructured{}
robot.SetKind("Robot")
robot.SetAPIVersion("registry.cloudrobotics.com/v1alpha1")
robot.SetName(robotName)
robot.SetLabels(labels)
robot.SetAnnotations(annotations)
robot.Object["spec"] = map[string]interface{}{
"type": robotType,
"project": project,
}
robot.Object["status"] = make(map[string]interface{})
_, err := client.Create(ctx, robot, metav1.CreateOptions{})
return err
} else {
return fmt.Errorf("Failed to get robot %v: %w", robotName, err)
}
}
// A robot with the same name already exists.
robot.SetLabels(mergeMaps(robot.GetLabels(), labels))
robot.SetAnnotations(mergeMaps(robot.GetAnnotations(), annotations))
spec, ok := robot.Object["spec"].(map[string]interface{})
if !ok {
return fmt.Errorf("unmarshaling robot failed: spec is not a map")
}
spec["type"] = robotType
spec["project"] = project
_, err = client.Update(ctx, robot, metav1.UpdateOptions{})
return err
}
================================================
FILE: src/go/pkg/setup/setupcommon_test.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package setup
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/golang/mock/gomock"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func TestSelectRobot(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockFactory := NewMockFactory(mockCtrl)
robots := []unstructured.Unstructured{
{
Object: map[string]interface{}{
"apiVersion": "registry.cloudrobotics.com/v1alpha1",
"kind": "Robot",
"metadata": map[string]interface{}{
"namespace": "default",
"name": "ro-1234",
"labels": map[string]interface{}{
"cloudrobotics.com/robot-name": "ro-1234",
},
},
"spec": map[string]interface{}{
"type": "test",
},
},
},
}
mockFactory.EXPECT().ScanInt().Return(1, nil).Times(1)
id, err := selectRobot(mockFactory, robots)
if id != "ro-1234" || err != nil {
t.Errorf("selectRobot(mockFactory, oneRobot) = %v, %v want ro-1234, nil", id, err)
}
}
func TestWaitForService_OkIfServiceResponds(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))
defer server.Close()
err := WaitForService(server.Client(), server.URL, 1)
if err != nil {
t.Errorf("WaitForService returned error: %v", err)
}
}
================================================
FILE: src/go/pkg/setup/util/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"factory.go",
"fake.go",
],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/setup/util",
visibility = ["//visibility:public"],
)
================================================
FILE: src/go/pkg/setup/util/factory.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"fmt"
"net"
"strconv"
"strings"
)
type Factory interface {
ScanInt() (int, error)
GetNetworkInterfaceIP(string) (string, error)
}
type DefaultFactory struct{}
func NewFactory() *DefaultFactory {
return &DefaultFactory{}
}
// Read stdin up to the next space and convert to an int.
func (f *DefaultFactory) ScanInt() (int, error) {
var s string
_, err := fmt.Scan(&s)
if err != nil {
return 0, err
}
i, err := strconv.Atoi(s)
if err != nil {
return 0, err
}
return i, nil
}
// GetNetworkInterfaceIP returns the IP address of the first local network interface whose name
// starts with namePrefix
func (f *DefaultFactory) GetNetworkInterfaceIP(namePrefix string) (string, error) {
ifaces, err := net.Interfaces()
if err != nil {
return "", err
}
for _, i := range ifaces {
if strings.HasPrefix(i.Name, namePrefix) {
addrs, err := i.Addrs()
if err != nil {
return "", err
}
for _, address := range addrs {
if ipnet, ok := address.(*net.IPNet); ok {
if ipnet.IP.To4() != nil {
return ipnet.IP.String(), nil
}
}
}
}
}
return "", fmt.Errorf("Could not look up IP of interface with prefix: %v", namePrefix)
}
================================================
FILE: src/go/pkg/setup/util/fake.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
type TestFactory struct {
ScanIntImpl func() (int, error)
}
func NewTestFactory() *TestFactory {
return &TestFactory{}
}
func (f *TestFactory) ScanInt() (int, error) {
return f.ScanIntImpl()
}
================================================
FILE: src/go/pkg/synk/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"interface.go",
"sort.go",
"synk.go",
],
importpath = "github.com/googlecloudrobotics/core/src/go/pkg/synk",
visibility = ["//visibility:public"],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"@com_github_cenkalti_backoff//:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_apiextensions_apiserver//pkg/apis/apiextensions/v1:go_default_library",
"@io_k8s_apimachinery//pkg/api/errors:go_default_library",
"@io_k8s_apimachinery//pkg/api/meta:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured:go_default_library",
"@io_k8s_apimachinery//pkg/runtime:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/schema:go_default_library",
"@io_k8s_apimachinery//pkg/types:go_default_library",
"@io_k8s_apimachinery//pkg/util/jsonmergepatch:go_default_library",
"@io_k8s_apimachinery//pkg/util/mergepatch:go_default_library",
"@io_k8s_apimachinery//pkg/util/strategicpatch:go_default_library",
"@io_k8s_client_go//discovery:go_default_library",
"@io_k8s_client_go//discovery/cached:go_default_library",
"@io_k8s_client_go//dynamic:go_default_library",
"@io_k8s_client_go//kubernetes/scheme:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
"@io_k8s_client_go//restmapper:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"sort_test.go",
"synk_test.go",
],
embed = [":go_default_library"],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_apimachinery//pkg/api/errors:go_default_library",
"@io_k8s_apimachinery//pkg/api/meta/testrestmapper:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured:go_default_library",
"@io_k8s_apimachinery//pkg/runtime:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/schema:go_default_library",
"@io_k8s_apimachinery//pkg/types:go_default_library",
"@io_k8s_client_go//discovery:go_default_library",
"@io_k8s_client_go//dynamic/fake:go_default_library",
"@io_k8s_client_go//kubernetes/scheme:go_default_library",
"@io_k8s_client_go//testing:go_default_library",
"@io_k8s_sigs_yaml//:go_default_library",
],
)
================================================
FILE: src/go/pkg/synk/interface.go
================================================
package synk
import (
"context"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
type Interface interface {
Init() error
Delete(ctx context.Context, name string) error
Apply(ctx context.Context, name string, opts *ApplyOptions, resources ...*unstructured.Unstructured) (*apps.ResourceSet, error)
}
================================================
FILE: src/go/pkg/synk/sort.go
================================================
// Copyright 2022 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package synk
import (
"fmt"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// gvknn is only used to unify the less functions.
type gvknn struct {
priority int
group string
version string
kind string
namespace string
name string
}
func newGvknn(group, version, kind, namespace, name string) *gvknn {
p := 999
switch kind {
case "Namespace":
// Adding resources to a non existing namespace removes them. So namespaces
// need to go early.
p = 1
case "ServiceAccount":
p = 2
case "Secret":
// We need ServiceAccount to be before Secret. The token controller removes
// Secrets with non existing ServiceAccount.
p = 3
}
return &gvknn{p, group, version, kind, namespace, name}
}
func less(l, r *gvknn) bool {
ls := fmt.Sprintf("%03d/%s/%s/%s/%s/%s", l.priority, l.group, l.version, l.kind, l.namespace, l.name)
rs := fmt.Sprintf("%03d/%s/%s/%s/%s/%s", r.priority, r.group, r.version, r.kind, r.namespace, r.name)
return ls < rs
}
func gvknnUnstructured(u *unstructured.Unstructured) *gvknn {
gvk := u.GroupVersionKind()
return newGvknn(gvk.Group, gvk.Version, gvk.Kind, u.GetNamespace(), u.GetName())
}
func gvknnRSpecG(r *apps.ResourceSetSpecGroup) *gvknn {
return newGvknn(r.Group, r.Version, r.Kind, "", "")
}
func gvknnRStatusG(r *apps.ResourceSetStatusGroup) *gvknn {
return newGvknn(r.Group, r.Version, r.Kind, "", "")
}
func lessUnstructured(l, r *unstructured.Unstructured) bool {
return less(gvknnUnstructured(l), gvknnUnstructured(r))
}
func lessResourceSetSpecGroup(l, r *apps.ResourceSetSpecGroup) bool {
return less(gvknnRSpecG(l), gvknnRSpecG(r))
}
func lessResourceSetStatusGroup(l, r *apps.ResourceSetStatusGroup) bool {
return less(gvknnRStatusG(l), gvknnRStatusG(r))
}
================================================
FILE: src/go/pkg/synk/sort_test.go
================================================
// Copyright 2022 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package synk
import (
"testing"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
)
func TestLessResourceSetStatusGroup(t *testing.T) {
a := &apps.ResourceSetStatusGroup{
Group: "g",
Version: "v",
Kind: "k",
}
b := &apps.ResourceSetStatusGroup{
Group: "g",
Version: "v",
Kind: "l",
}
if !lessResourceSetStatusGroup(a, b) {
t.Errorf("expected a < b")
}
if lessResourceSetStatusGroup(b, a) {
t.Errorf("expected b >= a")
}
}
func TestLessResourceSetSpecGroup(t *testing.T) {
a := &apps.ResourceSetSpecGroup{
Group: "g",
Version: "v",
Kind: "k",
}
b := &apps.ResourceSetSpecGroup{
Group: "g",
Version: "v",
Kind: "l",
}
if !lessResourceSetSpecGroup(a, b) {
t.Errorf("expected a < b")
}
if lessResourceSetSpecGroup(b, a) {
t.Errorf("expected b >= a")
}
}
func TestLessUnstructured(t *testing.T) {
a := newUnstructured("v1", "Secret", "ns1", "pod")
b := newUnstructured("v1", "Secret", "ns1", "poe")
if !lessUnstructured(a, b) {
t.Errorf("expected a < b")
}
if lessUnstructured(b, a) {
t.Errorf("expected b >= a")
}
}
func TestLess(t *testing.T) {
for _, tc := range []struct {
a, b *gvknn
}{
{newGvknn("a", "v", "k", "ns", "n"), newGvknn("b", "v", "k", "ns", "n")},
{newGvknn("g", "a", "k", "ns", "n"), newGvknn("g", "b", "k", "ns", "n")},
{newGvknn("g", "v", "a", "ns", "n"), newGvknn("g", "v", "b", "ns", "n")},
{newGvknn("g", "v", "k", "a", "n"), newGvknn("g", "v", "k", "b", "n")},
{newGvknn("g", "v", "k", "ns", "a"), newGvknn("g", "v", "k", "ns", "b")},
{newGvknn("g", "v", "ServiceAccount", "ns", "a"), newGvknn("g", "v", "Secret", "ns", "b")},
{newGvknn("g", "v", "Secret", "ns", "a"), newGvknn("g", "v", "ServiceAccount2", "ns", "b")},
} {
if !less(tc.a, tc.b) {
t.Errorf("expected a (%v) < b (%v)", tc.a, tc.b)
}
if less(tc.b, tc.a) {
t.Errorf("expected b (%v) >= a (%v)", tc.b, tc.a)
}
}
}
================================================
FILE: src/go/pkg/synk/synk.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package synk contains functionality to synchronize a batch of resources
// with a cluster while correctly handling CRDs and deletions.
package synk
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/cenkalti/backoff"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
"github.com/googlecloudrobotics/ilog"
"github.com/pkg/errors"
"go.opencensus.io/trace"
corev1 "k8s.io/api/core/v1"
apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/jsonmergepatch"
"k8s.io/apimachinery/pkg/util/mergepatch"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/discovery"
cacheddiscovery "k8s.io/client-go/discovery/cached"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
)
// src/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
const totalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB
// Synk allows to synchronize sets of resources with a fixed cluster.
type Synk struct {
discovery discovery.CachedDiscoveryInterface
client dynamic.Interface
mapper meta.RESTMapper
resetMapper func()
}
// New returns a new Synk object that acts against the cluster for the given configuration.
func New(client dynamic.Interface, discovery discovery.CachedDiscoveryInterface) *Synk {
s := &Synk{
discovery: discovery,
client: client,
}
// Store reset function seperately to allow reasonable tests.
m := restmapper.NewDeferredDiscoveryRESTMapper(discovery)
s.mapper = m
s.resetMapper = m.Reset
return s
}
func NewForConfig(cfg *rest.Config) (*Synk, error) {
client, err := dynamic.NewForConfig(cfg)
if err != nil {
return nil, err
}
discovery, err := discovery.NewDiscoveryClientForConfig(cfg)
if err != nil {
return nil, err
}
cachedDiscovery := cacheddiscovery.NewMemCacheClient(discovery)
// Without initial invalidation all calls will fail.
cachedDiscovery.Invalidate()
return New(client, cachedDiscovery), nil
}
// TODO: determine options that allow us to be semantically compatible with
// vanilla kubectl apply.
type ApplyOptions struct {
name string
version int32
// Namespace that's set for all namespaced resources that have no
// other namespace set yet.
Namespace string
// EnforceNamespace causes apply to fail if a resource has a namespace set
// that's different from Namespace.
EnforceNamespace bool
// Log functions to report progress and failures while applying resources.
Log func(r *unstructured.Unstructured, a apps.ResourceAction, status, msg string)
}
const (
StatusSuccess = "success"
StatusFailure = "failure"
)
func (o *ApplyOptions) logf(r *unstructured.Unstructured, action apps.ResourceAction, msg string, args ...interface{}) {
if o.Log != nil {
o.Log(r, action, StatusSuccess, fmt.Sprintf(msg, args...))
}
}
func (o *ApplyOptions) errorf(r *unstructured.Unstructured, action apps.ResourceAction, msg string, args ...interface{}) {
if o.Log != nil {
o.Log(r, action, StatusFailure, fmt.Sprintf(msg, args...))
}
}
// Init installs the ResourceSet CRD into the cluster and waits for
// it to become available.
// It does not need to be called before each use of Synk.
func (s *Synk) Init() error {
vTrue := true
crd := &apiextensions.CustomResourceDefinition{
TypeMeta: metav1.TypeMeta{
APIVersion: "apiextensions.k8s.io/v1",
Kind: "CustomResourceDefinition",
},
ObjectMeta: metav1.ObjectMeta{
Name: "resourcesets.apps.cloudrobotics.com",
},
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "apps.cloudrobotics.com",
Names: apiextensions.CustomResourceDefinitionNames{
Kind: "ResourceSet",
Plural: "resourcesets",
Singular: "resourceset",
},
Scope: apiextensions.ClusterScoped,
Versions: []apiextensions.CustomResourceDefinitionVersion{{
Name: "v1alpha1",
Served: true,
Storage: true,
// TODO(ensonic): replace with the actual schema
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextensions.JSONSchemaProps{
"spec": {
Type: "object",
XPreserveUnknownFields: &vTrue,
},
"status": {
Type: "object",
XPreserveUnknownFields: &vTrue,
},
},
},
},
}},
},
}
var u unstructured.Unstructured
if err := convert(crd, &u); err != nil {
return err
}
if _, err := s.applyOne(context.Background(), &u, nil); err != nil {
return errors.Wrap(err, "create ResourceSet CRD")
}
err := backoff.Retry(
func() error {
s.discovery.Invalidate()
ok, err := s.crdAvailable(&u)
if err != nil {
return err
}
if !ok {
return errors.New("crd not available")
}
return nil
},
backoff.WithMaxRetries(backoff.NewConstantBackOff(2*time.Second), 60),
)
if err != nil {
return errors.Wrap(err, "wait for ResourceSet CRD")
}
return nil
}
// Delete removes the resources that are part of the ResourceSet specified by
// 'name'. It uses so-called "foreground cascading deletion", which means that:
//
// - it returns after marking the ResourceSet for deletion, but before the
//
// resources have been deleted
//
// - the ResourceSet will not be deleted until all resources have been deleted
//
// This ensures that if a new ResourceSet is created before all resources have
// been deleted, it will have a higher version number.
func (s *Synk) Delete(ctx context.Context, name string) error {
policy := metav1.DeletePropagationForeground
deleteOpts := metav1.DeleteOptions{PropagationPolicy: &policy}
return s.client.Resource(resourceSetGVR).DeleteCollection(ctx, deleteOpts, metav1.ListOptions{
LabelSelector: fmt.Sprintf("name=%s", name),
})
}
// Apply installs or updates the ResourceSet specified by 'name'.
func (s *Synk) Apply(
ctx context.Context,
name string,
opts *ApplyOptions,
resources ...*unstructured.Unstructured,
) (*apps.ResourceSet, error) {
if opts == nil {
opts = &ApplyOptions{}
}
opts.name = name
// applyAll() updates the resources in place. To avoid modifying the
// caller's slice, copy the resources first.
resources = append([]*unstructured.Unstructured(nil), resources...)
for i, r := range resources {
resources[i] = r.DeepCopy()
}
rs, resources, err := s.initialize(ctx, opts, resources...)
if err != nil {
return rs, err
}
results, applyErr := s.applyAll(ctx, rs, opts, resources...)
defer func() {
// We always want to clean up old failed ResourceSets. There is no reason
// to keep multiple failed ones around. But a failure in the cleanup is not
// critical. So we only log it.
if err := s.deleteFailedResourceSets(ctx, opts.name, opts.version); err != nil {
slog.Warn("Failed to remove failed ResourceSets", slog.Any("Name", opts.name), slog.Any("Error", err))
}
}()
if err := s.updateResourceSetStatus(ctx, rs, results); err != nil {
return rs, err
}
if applyErr == nil {
if err := s.deleteResourceSets(ctx, opts.name, opts.version); err != nil {
return rs, err
}
}
return rs, applyErr
}
type transientErr struct {
error
}
// IsTransientErr returns true if the error may resolve by retrying the operation.
func IsTransientErr(err error) bool {
// Either a custom error is specifically wrapped in transientErr or the innermost
// error is a known transient Kubernetes API error.
_, ok1 := err.(transientErr)
_, ok2 := err.(*transientErr)
if ok1 || ok2 {
return true
}
err = errors.Cause(err)
switch {
// May happen on resourceVersion mismatches or patch conflicts.
case k8serrors.IsConflict(err):
case k8serrors.IsResourceExpired(err):
// May happen if a created object has already been created.
case k8serrors.IsAlreadyExists(err):
// May happen if a patched resource has already been deleted.
case k8serrors.IsNotFound(err):
case k8serrors.IsGone(err):
// Server-side transient errors.
case k8serrors.IsInternalError(err):
case k8serrors.IsServerTimeout(err):
case k8serrors.IsTimeout(err):
case k8serrors.IsTooManyRequests(err):
case k8serrors.IsServiceUnavailable(err):
// May happen shortly after CRD creation.
case discovery.IsGroupDiscoveryFailedError(err):
// May happen if a chart is deleted and immediately recreated.
// https://github.com/kubernetes/kubernetes/blob/d2a081c8e14e21e28fe5bdfa38a817ef9c0bb8e3/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go#L173
case strings.Contains(err.Error(), "unable to create new content in namespace"):
// Note: Golang switch cases don't fall through, so we only return
// false in the default case.
default:
return false
}
return true
}
func (s *Synk) applyAll(
ctx context.Context,
rs *apps.ResourceSet,
opts *ApplyOptions,
resources ...*unstructured.Unstructured,
) (applyResults, error) {
results := applyResults{}
crds, regulars := separateCRDsFromResources(resources)
// Insert CRDs and wait for them to become available.
for _, crd := range crds {
// CRDs must never be replaced as deleting them will delete
// all its current instances. Update conflicts must be resolved manually.
action, err := s.applyOne(ctx, crd, rs)
if err != nil {
opts.errorf(crd, action, "failed to apply: %s", err)
} else {
opts.logf(crd, action, "applied successfully")
}
results.set(crd, action, err)
}
err := backoff.Retry(
func() error {
s.discovery.Invalidate()
for _, crd := range crds {
if ok, err := s.crdAvailable(crd); err != nil {
return backoff.Permanent(err)
} else if !ok {
return fmt.Errorf("crd not yet available: %q", crd.GetName())
}
}
return nil
},
backoff.WithMaxRetries(backoff.NewConstantBackOff(2*time.Second), 60),
)
if err != nil {
return results, errors.Wrap(err, "wait for CRDs")
}
// Reset all discovery and mapping once again.
s.resetMapper()
// Try applying until the errors stay the same between iterations. Put in
// an upper bound just in case of flapping errors.
prevFailures := 0
for i := 0; i < 10; i++ {
curFailures := 0
for _, r := range regulars {
// Don't retry resources that were applied successfully
// in the first iteration.
if i > 0 && !results.failed(r) {
continue
}
// Attach the ResourceSet as owner. CRDs are exempt since
// the risk of unintended deletion of all its instances is too high.
setOwnerRef(r, rs)
action, err := s.applyOne(ctx, r, rs)
if err != nil {
curFailures++
opts.errorf(r, action, "failed to apply, may retry: %s", err)
} else {
opts.logf(r, action, "applied successfully")
}
results.set(r, action, err)
}
if curFailures == 0 || curFailures == prevFailures {
break
}
prevFailures = curFailures
}
// The overall error we return is a transient error if all resource errors
// are transient. If there's at least one permanent failure, retrying
// will never make Apply overall successful.
allTransient := true
numErrors := 0
var firstFailure *applyResult
for _, r := range results {
if r.err != nil {
if !IsTransientErr(r.err) {
allTransient = false
}
if firstFailure == nil {
firstFailure = r
}
numErrors++
}
}
if numErrors == 0 {
return results, nil
}
err = fmt.Errorf("%d/%d resources failed to apply", numErrors, len(results))
if numErrors == 1 {
err = fmt.Errorf("%s: %s: %s", err, resourceKey(firstFailure.resource), firstFailure.err)
} else {
err = fmt.Errorf("%s, including %s: %s", err, resourceKey(firstFailure.resource), firstFailure.err)
}
if allTransient {
err = transientErr{err}
}
return results, err
}
// initialize a new ResourceSet version for the given name and prepare resources
// for it.
func (s *Synk) initialize(
ctx context.Context,
opts *ApplyOptions,
resources ...*unstructured.Unstructured,
) (*apps.ResourceSet, []*unstructured.Unstructured, error) {
// Cleanup and sort resources.
resources = filter(resources, func(r *unstructured.Unstructured) bool {
return !reflect.DeepEqual(*r, unstructured.Unstructured{}) && !isTestResource(r)
})
sortResources(resources)
crds, regulars := separateCRDsFromResources(resources)
if err := s.populateNamespaces(ctx, opts.Namespace, crds, regulars...); err != nil {
return nil, nil, errors.Wrap(err, "set default namespaces")
}
// TODO: consider putting this and other validation as a step after initialize
// so we can give validation errors in batch in the ResourceSet status.
if opts.EnforceNamespace {
for _, r := range regulars {
if ns := r.GetNamespace(); ns != "" && ns != opts.Namespace && ns != "kube-system" {
return nil, nil, errors.Errorf("invalid namespace %q on %q, expected %q or \"kube-system\"", ns, resourceKey(r), opts.Namespace)
}
}
}
// Initialize and create next ResourceSet.
var err error
opts.version, err = s.next(ctx, opts.name)
if err != nil {
return nil, nil, errors.Wrap(err, "get next ResourceSet version")
}
var rs apps.ResourceSet
rs.Name = resourceSetName(opts.name, opts.version)
rs.Labels = map[string]string{"name": opts.name}
groupedResources := map[schema.GroupVersionKind][]apps.ResourceRef{}
for _, r := range resources {
gvk := r.GroupVersionKind()
groupedResources[gvk] = append(groupedResources[gvk], apps.ResourceRef{
Namespace: r.GetNamespace(),
Name: r.GetName(),
})
}
for gvk, res := range groupedResources {
rs.Spec.Resources = append(rs.Spec.Resources, apps.ResourceSetSpecGroup{
Group: gvk.Group,
Version: gvk.Version,
Kind: gvk.Kind,
Items: res,
})
}
sort.Slice(rs.Spec.Resources, func(i, j int) bool {
return lessResourceSetSpecGroup(&rs.Spec.Resources[i], &rs.Spec.Resources[j])
})
rs.Status = apps.ResourceSetStatus{
Phase: apps.ResourceSetPhasePending,
StartedAt: metav1.Now(),
}
if err := s.createResourceSet(ctx, &rs); err != nil {
return nil, nil, errors.Wrapf(err, "create resources object %q", rs.Name)
}
return &rs, resources, nil
}
// Set default namespace on all namespaced resources.
func (s *Synk) populateNamespaces(
ctx context.Context,
ns string,
crds []*unstructured.Unstructured,
resources ...*unstructured.Unstructured,
) error {
_, span := trace.StartSpan(ctx, "Discover server resources")
// Invalidate is cheap (no noticeable effect on the duration of
// ServerGroupsAndResources) and reduces the frequency of the "stale
// GroupVersion discovery" warning.
s.discovery.Invalidate()
_, list, err := s.discovery.ServerGroupsAndResources()
span.End()
if err != nil {
if len(list) == 0 {
// This error is only fatal if it actually fails to discover resources.
// Otherwise it indicates that the apiserver has cached some bad
// information about an aggregated API extension.
return errors.Wrap(err, "discover server resources")
}
slog.Warn("Ignoring error from ServerGroupsAndResources", ilog.Err(err))
}
// We have to consider discoverable resources as well as CRDs that
// will only be added later.
isNamespaced := map[string]bool{}
for _, srvRes := range list {
for _, sr := range srvRes.APIResources {
isNamespaced[srvRes.GroupVersion+"/"+sr.Kind] = sr.Namespaced
}
}
for _, crd := range crds {
var typed apiextensions.CustomResourceDefinition
if err := convert(crd, &typed); err != nil {
return errors.Wrapf(err, "invalid CustomResourceDefinition %q", resourceKey(crd))
}
for _, v := range typed.Spec.Versions {
k := typed.Spec.Group + "/" + v.Name + "/" + typed.Spec.Names.Kind
isNamespaced[k] = typed.Spec.Scope != apiextensions.ClusterScoped
}
}
for _, r := range resources {
if r.GetNamespace() != "" {
continue
}
gvk := r.GetAPIVersion() + "/" + r.GetKind()
rIsNamespaced, ok := isNamespaced[gvk]
if !ok {
slog.Warn("Neither apiserver nor chart CRDs indicate if resource is Namespaced or Cluster-scoped, assuming Cluster-scoped",
slog.String("Namespace", ns), slog.String("Kind", gvk))
}
if rIsNamespaced {
r.SetNamespace(ns)
}
}
return nil
}
func deleteAppliedAnnotation(u *unstructured.Unstructured) {
anns := u.GetAnnotations()
if anns == nil {
anns = map[string]string{}
}
// Delete any potential pre-existing annotation.
delete(anns, corev1.LastAppliedConfigAnnotation)
u.SetAnnotations(anns)
}
func setAppliedAnnotation(u *unstructured.Unstructured) error {
deleteAppliedAnnotation(u)
b, err := u.MarshalJSON()
if err != nil {
return err
}
if len(b) >= totalAnnotationSizeLimitB {
return errors.Errorf("skipping annotation %q for %q: size %d > max. allowed size %d",
corev1.LastAppliedConfigAnnotation, u.GetName(), len(b), totalAnnotationSizeLimitB)
}
anns := u.GetAnnotations()
anns[corev1.LastAppliedConfigAnnotation] = string(b)
u.SetAnnotations(anns)
return nil
}
func getAppliedAnnotation(u *unstructured.Unstructured) []byte {
return []byte(u.GetAnnotations()[corev1.LastAppliedConfigAnnotation])
}
// validateOwnerRefs returns an error if the resource has ResourceSet owners
// that are not predecessors of name/version.
func validateOwnerRefs(r *unstructured.Unstructured, set *apps.ResourceSet) error {
if set == nil {
return nil
}
name, version, ok := decodeResourceSetName(set.Name)
if !ok {
return errors.Errorf("invalid ResourceSet name %q", set.Name)
}
for _, or := range r.GetOwnerReferences() {
if or.APIVersion != "apps.cloudrobotics.com/v1alpha1" || or.Kind != "ResourceSet" {
continue
}
n, v, ok := decodeResourceSetName(or.Name)
if !ok {
return errors.Errorf("ResourceSet owner reference with invalid name %q", or.Name)
}
if n != name {
return errors.Errorf("owned by conflicting ResourceSet object %q", or.Name)
}
if v > version {
// TODO(rodrigoq): should this be transient to cope with concurrent synk runs?
return errors.Errorf("owned by newer ResourceSet %q > v%d", or.Name, version)
}
}
return nil
}
// setOwnerRef sets the ResourceSet as the owner and removers all other ResourceSet
// owner references.
func setOwnerRef(r *unstructured.Unstructured, set *apps.ResourceSet) {
var newRefs []metav1.OwnerReference
for _, or := range r.GetOwnerReferences() {
if or.APIVersion != "apps.cloudrobotics.com/v1alpha1" || or.Kind != "ResourceSet" {
newRefs = append(newRefs, or)
}
}
_true := true
newRefs = append(newRefs, metav1.OwnerReference{
APIVersion: "apps.cloudrobotics.com/v1alpha1",
Kind: "ResourceSet",
Name: set.Name,
UID: set.UID,
BlockOwnerDeletion: &_true,
})
r.SetOwnerReferences(newRefs)
}
// canReplace determines whether an "apply patch/update" error is likely to be
// resolved by deleting and recreating the resource. Some resources have
// immutable fields (eg Job.spec.template) that can only be changed this way.
// This is analogous to `kubectl apply --force`.
func canReplace(resource *unstructured.Unstructured, patchErr error) bool {
k := resource.GetKind()
e := patchErr.Error()
if (k == "DaemonSet" || k == "Deployment" || k == "Job") && strings.Contains(e, "field is immutable") {
return true
}
if k == "Service" && (strings.Contains(e, "field is immutable") || strings.Contains(e, "may not change once set") || strings.Contains(e, "can not be unset")) {
return true
}
if k == "PersistentVolume" && strings.Contains(e, "is immutable after creation") {
v, ok, err := unstructured.NestedString(resource.Object, "spec", "persistentVolumeReclaimPolicy")
if err == nil && ok && v == "Retain" {
return true
}
slog.Info("Not replacing PersistentVolume since reclaim policy is not Retain", slog.String("Policy", v))
}
if (k == "ValidatingWebhookConfiguration" || k == "MutatingWebhookConfiguration") && strings.Contains(e, "must be specified for an update") {
return true
}
// TODO(rodrigoq): can other resources be safely replaced?
return false
}
func replace(ctx context.Context, client dynamic.ResourceInterface, resource *unstructured.Unstructured) (*unstructured.Unstructured, error) {
// Foreground deletion means that the new job can't be created until the old
// pods are gone, so updates to a currently-running job are safer.
policy := metav1.DeletePropagationForeground
deleteOpts := metav1.DeleteOptions{PropagationPolicy: &policy}
if err := client.Delete(ctx, resource.GetName(), deleteOpts); err != nil {
return nil, errors.Wrap(err, "delete")
}
res, err := client.Create(ctx, resource, metav1.CreateOptions{})
if err != nil {
// This is likely to occur if deletion is not immediate, in which case
// this returns a transient AlreadyExists error, and the outer loop will
// retry until the resource is deleted.
return nil, errors.Wrap(err, "create")
}
return res, nil
}
func (s *Synk) applyOne(ctx context.Context, resource *unstructured.Unstructured, set *apps.ResourceSet) (apps.ResourceAction, error) {
// If name is unset, we'd retrieve a list below and panic.
// TODO: This may be valid if generateName is set instead. In this case we
// want to create the resource in any case.
if resource.GetName() == "" {
return apps.ResourceActionNone, errors.New("missing resource name")
}
ctx, span := trace.StartSpan(ctx, "Apply "+resource.GetName())
defer span.End()
// GroupVersionKind is not sufficient to determine the REST API path to use
// for the resource. We need to get this information from the RESTMapper,
// which uses the discovery API to determine the right GroupVersionResource.
gvk := resource.GroupVersionKind()
mapping, err := s.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return apps.ResourceActionNone, errors.Wrap(err, "get REST mapping")
}
var client dynamic.ResourceInterface
if mapping.Scope.Name() == meta.RESTScopeNameRoot {
client = s.client.Resource(mapping.Resource)
} else {
client = s.client.Resource(mapping.Resource).Namespace(resource.GetNamespace())
}
resetAppliedAnnotation := false
if err := setAppliedAnnotation(resource); err != nil {
slog.Warn("Storing Applied Annotation failed", ilog.Err(err))
resetAppliedAnnotation = true
}
// Create the resource if it doesn't exist yet.
_, getSpan := trace.StartSpan(ctx, "Get "+resource.GetName())
current, err := client.Get(ctx, resource.GetName(), metav1.GetOptions{})
getSpan.End()
if k8serrors.IsNotFound(err) {
_, createSpan := trace.StartSpan(ctx, "Create "+resource.GetName())
res, err := client.Create(ctx, resource, metav1.CreateOptions{})
createSpan.End()
if err != nil {
return apps.ResourceActionCreate, errors.Wrap(err, "create resource")
}
*resource = *res
return apps.ResourceActionCreate, nil
} else if err != nil {
return apps.ResourceActionNone, errors.Wrap(err, "get resource")
}
if err := validateOwnerRefs(current, set); err != nil {
return apps.ResourceActionNone, errors.Wrap(err, "owner conflict")
}
// Get what is running, what was installed and what we want to run.
currentRaw, err := current.MarshalJSON()
if err != nil {
return apps.ResourceActionNone, err
}
resourceRaw, err := resource.MarshalJSON()
if err != nil {
return apps.ResourceActionNone, err
}
if resetAppliedAnnotation {
deleteAppliedAnnotation(current)
}
originalRaw := getAppliedAnnotation(current)
var patchErr error
if len(originalRaw) > 0 {
// Try to patch it.
var (
patchType types.PatchType
patch []byte
)
obj, err := scheme.Scheme.New(mapping.GroupVersionKind)
if err == nil {
// TODO: add option to dynamically load patch meta from discovery API
// for full kubectl compatibility.
patchMeta, err := strategicpatch.NewPatchMetaFromStruct(obj)
if err != nil {
return apps.ResourceActionNone, errors.Wrap(err, "lookup patch meta")
}
// TODO: Make overwrite boolean configurable for full kubectl compatibility.
patch, err = strategicpatch.CreateThreeWayMergePatch(
originalRaw, resourceRaw, currentRaw,
patchMeta, true,
)
if err != nil {
return apps.ResourceActionNone, errors.Wrap(err, "create strategic-merge-patch")
}
patchType = types.StrategicMergePatchType
} else if runtime.IsNotRegisteredError(err) {
patch, err = jsonmergepatch.CreateThreeWayJSONMergePatch(
originalRaw, resourceRaw, currentRaw,
mergepatch.RequireKeyUnchanged("apiVersion"),
mergepatch.RequireKeyUnchanged("kind"),
mergepatch.RequireMetadataKeyUnchanged("name"),
)
if err != nil {
return apps.ResourceActionNone, errors.Wrap(err, "create json-merge-patch")
}
patchType = types.MergePatchType
} else {
return apps.ResourceActionNone, errors.Wrap(err, "instantiate object")
}
// As the ownerReference is bumped, the patch will never be empty
// and we cannot skip it.
// CL https://github.com/kubernetes/kubernetes/pull/71156
// added an option to fix a patch against a specific resourceVersion.
// However, it isn't used anywhere in kubectl apply itself. Thus we don't do it here either.
// Additionally the CL doesn't seem to implement valid behavior as the patch
// retries will not update to a new resourceVersion and the failure would persist.
_, patchSpan := trace.StartSpan(ctx, "Patch "+resource.GetName())
res, err := client.Patch(ctx, resource.GetName(), patchType, patch, metav1.PatchOptions{})
patchSpan.End()
if err == nil {
// Successfully patched.
*resource = *res
return apps.ResourceActionUpdate, nil
}
patchErr = err
} else {
// We don't have lastApplied state, hence try a direct Update without a
// 3-way-merge. This can happen if the resource is too large for the
// annotation or has been deleted.
resource.SetResourceVersion(current.GetResourceVersion())
_, updateSpan := trace.StartSpan(ctx, "Update "+resource.GetName())
res, err := client.Update(ctx, resource, metav1.UpdateOptions{})
updateSpan.End()
if err == nil {
// Successfully updated.
*resource = *res
return apps.ResourceActionUpdate, nil
}
patchErr = err
}
// If patching/updating failed, consider deleting and recreating the resource.
if !canReplace(resource, patchErr) {
return apps.ResourceActionUpdate, errors.Wrap(patchErr, "apply patch or update")
}
_, replace_span := trace.StartSpan(ctx, "Replace "+resource.GetName())
res, err := replace(ctx, client, resource)
replace_span.End()
if err != nil {
return apps.ResourceActionReplace, errors.Wrap(err, "replace")
}
*resource = *res
return apps.ResourceActionReplace, nil
}
// crdAvailable checks if all versions of the given CRD are present in the
// server's discovery information. Callers must use s.Discovery.Invalidate()
// to clear the discovery cache before calling this method to check against the
// latest server state.
func (s *Synk) crdAvailable(ucrd *unstructured.Unstructured) (bool, error) {
var crd apiextensions.CustomResourceDefinition
if err := convert(ucrd, &crd); err != nil {
return false, err
}
// Get a list of versions to check for.
var versions []string
for _, v := range crd.Spec.Versions {
if v.Served {
versions = append(versions, v.Name)
}
}
for _, v := range versions {
list, err := s.discovery.ServerResourcesForGroupVersion(crd.Spec.Group + "/" + v)
if err != nil {
// We'd like to detect "not found" vs network errors here. But unfortunately
// there's no canonical error being used.
slog.Warn("ServerResourcesForGroupVersion failed",
slog.String("Group", crd.Spec.Group),
slog.String("Version", v),
ilog.Err(err))
return false, nil
}
found := false
for _, r := range list.APIResources {
if r.Name == crd.Spec.Names.Plural {
found = true
break
}
}
if !found {
return false, nil
}
}
return true, nil
}
var resourceSetGVR = schema.GroupVersionResource{
Group: "apps.cloudrobotics.com",
Version: "v1alpha1",
Resource: "resourcesets",
}
func (s *Synk) createResourceSet(ctx context.Context, rs *apps.ResourceSet) error {
rs.Kind = "ResourceSet"
rs.APIVersion = "apps.cloudrobotics.com/v1alpha1"
var u unstructured.Unstructured
if err := convert(rs, &u); err != nil {
return err
}
res, err := s.client.Resource(resourceSetGVR).Create(ctx, &u, metav1.CreateOptions{})
if err != nil {
return err
}
return convert(res, rs)
}
type applyResult struct {
resource *unstructured.Unstructured
err error
action apps.ResourceAction
}
func (r *applyResult) String() string {
return fmt.Sprintf("%s action=%s error=%s", resourceKey(r.resource), r.action, r.err)
}
type applyResults map[string]*applyResult
func (r applyResults) set(res *unstructured.Unstructured, action apps.ResourceAction, err error) {
r[resourceKey(res)] = &applyResult{
resource: res,
action: action,
err: err,
}
}
func (r applyResults) failed(res *unstructured.Unstructured) bool {
if x, ok := r[resourceKey(res)]; ok && x.err != nil {
return true
}
return false
}
func (r applyResults) list() (l []*applyResult) {
for _, res := range r {
l = append(l, res)
}
sort.Slice(l, func(i, j int) bool {
return lessUnstructured(l[i].resource, l[j].resource)
})
return l
}
func (s *Synk) updateResourceSetStatus(ctx context.Context, rs *apps.ResourceSet, results applyResults) error {
type group map[schema.GroupVersionKind][]apps.ResourceStatus
applied, failed := group{}, group{}
for _, r := range results.list() {
st := apps.ResourceStatus{
Namespace: r.resource.GetNamespace(),
Name: r.resource.GetName(),
Action: r.action,
UID: string(r.resource.GetUID()),
Generation: r.resource.GetGeneration(),
}
if r.err != nil {
st.Error = r.err.Error()
}
gvk := r.resource.GroupVersionKind()
if r.err != nil {
failed[gvk] = append(failed[gvk], st)
} else {
applied[gvk] = append(applied[gvk], st)
}
}
// Attach group map as sorted status list.
build := func(g group, list *[]apps.ResourceSetStatusGroup) {
for gvk, res := range g {
*list = append(*list, apps.ResourceSetStatusGroup{
Group: gvk.Group,
Version: gvk.Version,
Kind: gvk.Kind,
Items: res,
})
}
sort.Slice(*list, func(i, j int) bool {
return lessResourceSetStatusGroup(&(*list)[i], &(*list)[j])
})
}
build(applied, &rs.Status.Applied)
build(failed, &rs.Status.Failed)
rs.Status.FinishedAt = metav1.Now()
if len(rs.Status.Failed) > 0 {
rs.Status.Phase = apps.ResourceSetPhaseFailed
} else {
rs.Status.Phase = apps.ResourceSetPhaseSettled
}
var u unstructured.Unstructured
if err := convert(rs, &u); err != nil {
return err
}
res, err := s.client.Resource(resourceSetGVR).Update(ctx, &u, metav1.UpdateOptions{})
if err != nil {
return errors.Wrap(err, "update ResourceSet status")
}
return convert(res, rs)
}
// deleteFailedResourceSets deletes all failed ResourceSets of the given name
// that have a lower version.
func (s *Synk) deleteFailedResourceSets(ctx context.Context, name string, version int32) error {
c := s.client.Resource(resourceSetGVR)
list, err := c.List(ctx, metav1.ListOptions{
LabelSelector: "name=" + name,
})
if err != nil {
return errors.Wrap(err, "list existing resources")
}
for _, r := range list.Items {
phase, found, err := unstructured.NestedString(r.Object, "status", "phase")
if err != nil {
return errors.Wrapf(err, "failed to get status.phase from ResourceSet %q", r.GetName())
}
if !found || phase != "Failed" {
continue
}
n, v, ok := decodeResourceSetName(r.GetName())
if !ok || n != name || v >= version {
continue
}
// TODO: should we possibly opt for foreground deletion here so
// we only return after all dependents have been deleted as well?
// kubectl doesn't allow to opt into foreground deletion in general but
// here it would likely bring us closer to the apply --prune semantics.
if err := c.Delete(ctx, r.GetName(), metav1.DeleteOptions{}); err != nil {
return errors.Wrapf(err, "delete ResourceSet %q", r.GetName())
}
}
return nil
}
// deleteResourceSets deletes all ResourceSets of the given name that have a
// lower version.
func (s *Synk) deleteResourceSets(ctx context.Context, name string, version int32) error {
c := s.client.Resource(resourceSetGVR)
list, err := c.List(ctx, metav1.ListOptions{
LabelSelector: "name=" + name,
})
if err != nil {
return errors.Wrap(err, "list existing resources")
}
for _, r := range list.Items {
n, v, ok := decodeResourceSetName(r.GetName())
if !ok || n != name || v >= version {
continue
}
// TODO: should we possibly opt for foreground deletion here so
// we only return after all dependents have been deleted as well?
// kubectl doesn't allow to opt into foreground deletion in general but
// here it would likely bring us closer to the apply --prune semantics.
if err := c.Delete(ctx, r.GetName(), metav1.DeleteOptions{}); err != nil {
return errors.Wrapf(err, "delete ResourceSet %q", r.GetName())
}
}
return nil
}
// next returns the next version for the resources name.
func (s *Synk) next(ctx context.Context, name string) (version int32, err error) {
list, err := s.client.Resource(resourceSetGVR).List(ctx, metav1.ListOptions{})
if err != nil {
return 0, errors.Wrap(err, "list existing ResourceSets")
}
var curVersion int32
for _, r := range list.Items {
n, v, ok := decodeResourceSetName(r.GetName())
if !ok || n != name {
continue
}
if v > curVersion {
curVersion = v
}
}
return curVersion + 1, nil
}
// Filter for helm-hooks that mark test resources. See
// https://github.com/googlecloudrobotics/core/issues/20
func isTestResource(r *unstructured.Unstructured) bool {
hook, ok := r.GetAnnotations()["helm.sh/hook"]
return ok && strings.HasPrefix(hook, "test-")
}
func isCustomResourceDefinition(r *unstructured.Unstructured) bool {
return strings.HasPrefix(r.GetAPIVersion(), "apiextensions.k8s.io/") && r.GetKind() == "CustomResourceDefinition"
}
func separateCRDsFromResources(resources []*unstructured.Unstructured) (crds []*unstructured.Unstructured, regulars []*unstructured.Unstructured) {
for _, r := range resources {
if isCustomResourceDefinition(r) {
crds = append(crds, r)
} else {
regulars = append(regulars, r)
}
}
return crds, regulars
}
func filter(in []*unstructured.Unstructured, f func(*unstructured.Unstructured) bool) (out []*unstructured.Unstructured) {
for _, r := range in {
if f(r) {
out = append(out, r)
}
}
return out
}
func resourceSetName(s string, v int32) string {
return fmt.Sprintf("%s.v%d", s, v)
}
var namePat = regexp.MustCompile(`^(.+)\.v([0-9]+)$`)
func decodeResourceSetName(s string) (string, int32, bool) {
res := namePat.FindStringSubmatch(s)
if len(res) == 0 {
return "", 0, false
}
version, err := strconv.Atoi(res[2])
if err != nil {
panic(err)
}
return res[1], int32(version), true
}
func sortResources(res []*unstructured.Unstructured) {
sort.Slice(res, func(i, j int) (b bool) {
return lessUnstructured(res[i], res[j])
})
}
func resourceKey(r *unstructured.Unstructured) string {
gvk := r.GroupVersionKind()
return fmt.Sprintf("%s/%s/%s",
gvkKey(gvk.Group, gvk.Version, gvk.Kind),
r.GetNamespace(),
r.GetName())
}
func gvkKey(group, version, kind string) string {
return fmt.Sprintf("%s/%s/%s", group, version, kind)
}
// convert a resource from one type representation to another one.
func convert(from, to runtime.Object) error {
b, err := json.Marshal(from)
if err != nil {
return err
}
return json.Unmarshal(b, &to)
}
================================================
FILE: src/go/pkg/synk/synk_test.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package synk
import (
"context"
"fmt"
"reflect"
"strings"
"testing"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
dynamicfake "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes/scheme"
k8stest "k8s.io/client-go/testing"
"sigs.k8s.io/yaml"
)
// The fake discovery client has quite a bit of code but still returns empty results
// even when pointed at the Fake object of the dynamic client.
// Thus we implement our own static one.
type fakeCachedDiscoveryClient struct {
discovery.CachedDiscoveryInterface
}
func (d *fakeCachedDiscoveryClient) Invalidate() {}
func (d *fakeCachedDiscoveryClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) {
return nil, []*metav1.APIResourceList{
{
GroupVersion: "v1",
APIResources: []metav1.APIResource{
{Kind: "Pod", Namespaced: true},
{Kind: "Namespace", Namespaced: false},
},
},
}, nil
}
type fixture struct {
*testing.T
fake *k8stest.Fake
// Starting state the respective client will report.
objects []runtime.Object
// Actions we want to see called against the respective client.
actions []k8stest.Action
}
func newFixture(t *testing.T) *fixture {
return &fixture{T: t}
}
func (f *fixture) newSynk() *Synk {
sc := runtime.NewScheme()
scheme.AddToScheme(sc)
apps.AddToScheme(sc) // For tests with CRDs.
var (
client = dynamicfake.NewSimpleDynamicClient(sc, f.objects...)
s = New(client, &fakeCachedDiscoveryClient{})
)
s.mapper = testrestmapper.TestOnlyStaticRESTMapper(sc)
s.resetMapper = func() {}
f.fake = &client.Fake
return s
}
func (f *fixture) addObjects(objs ...runtime.Object) {
f.objects = append(f.objects, objs...)
}
func (f *fixture) expectActions(as ...k8stest.Action) {
f.actions = append(f.actions, as...)
}
func (f *fixture) verifyWriteActions() {
writes := filterReadActions(f.fake.Actions())
if !reflect.DeepEqual(writes, f.actions) {
f.Errorf("writes did not match")
f.Logf("received:")
for i, a := range writes {
f.Logf("%d: %s", i, sprintAction(a))
}
f.Logf("expected:")
for i, a := range f.actions {
f.Logf("%d: %s", i, sprintAction(a))
}
}
}
func TestSynk_IsTransientErr(t *testing.T) {
tests := []struct {
err error
want bool
}{
{
errors.New("generic error"),
false,
},
{
transientErr{errors.New("transientErr struct")},
true,
},
{
&transientErr{errors.New("transientErr pointer")},
true,
},
{
k8serrors.NewUnauthorized("unauthorized"),
false,
},
{
k8serrors.NewResourceExpired("gone"),
true,
},
{
k8serrors.NewForbidden(schema.GroupResource{
Group: "apps",
Resource: "deployments",
}, "my-deployment", errors.New("unable to create new content in namespace app-test-chart because it is being terminated")),
true,
},
}
for _, tc := range tests {
t.Run(tc.err.Error(), func(t *testing.T) {
if got := IsTransientErr(tc.err); got != tc.want {
t.Errorf("IsTransientErr(%v)=%v, want %v", tc.err, got, tc.want)
}
})
}
}
// TODO(rodrigoq): test Apply() directly rather than the private methods
func TestSynk_initialize(t *testing.T) {
ctx := context.Background()
s := newFixture(t).newSynk()
_, _, err := s.initialize(ctx, &ApplyOptions{name: "test"},
newUnstructured("v1", "Pod", "ns2", "pod1"),
newUnstructured("apps/v1", "Deployment", "ns1", "deploy1"),
newUnstructured("v1", "Pod", "ns1", "pod1"),
)
if err != nil {
t.Fatal(err)
}
got, err := s.client.Resource(resourceSetGVR).Get(ctx, "test.v1", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
var want unstructured.Unstructured
unmarshalYAML(t, &want, `
apiVersion: apps.cloudrobotics.com/v1alpha1
kind: ResourceSet
metadata:
labels:
name: test
name: test.v1
spec:
resources:
- version: v1
kind: Pod
items:
- name: pod1
namespace: ns1
- name: pod1
namespace: ns2
- group: apps
version: v1
kind: Deployment
items:
- namespace: ns1
name: deploy1
status:
phase: Pending
`)
if want.GetName() != got.GetName() {
t.Errorf("expected name %q but got %q", want.GetName(), got.GetName())
}
wantPhase, _, _ := unstructured.NestedString(want.Object, "status", "phase")
gotPhase, _, _ := unstructured.NestedString(got.Object, "status", "phase")
if wantPhase != gotPhase {
t.Errorf("expected status phase %q but got %q", wantPhase, gotPhase)
}
if !reflect.DeepEqual(want.Object["spec"], got.Object["spec"]) {
t.Errorf("expected spec\n%v\nbut got\n%v", want.Object["spec"], got.Object["spec"])
}
}
func TestSynk_updateResourceSetStatus(t *testing.T) {
ctx := context.Background()
f := newFixture(t)
s := f.newSynk()
rs := &apps.ResourceSet{
ObjectMeta: metav1.ObjectMeta{Name: "set1"},
}
if err := s.createResourceSet(ctx, rs); err != nil {
t.Fatal(err)
}
results := applyResults{
"/v1/Pod/ns1/pod1": &applyResult{
resource: newUnstructured("v1", "Pod", "ns1", "pod1"),
action: apps.ResourceActionCreate,
err: errors.New("oops"),
},
"/v1/Pod/ns1/pod2": &applyResult{
resource: newUnstructured("v1", "Pod", "ns1", "pod2"),
action: apps.ResourceActionCreate,
},
"/v1/Pod/ns2/pod1": &applyResult{
resource: newUnstructured("v1", "Pod", "ns2", "pod1"),
action: apps.ResourceActionUpdate,
},
"apps/v1/Deployment/ns1/deploy1": &applyResult{
resource: newUnstructured("apps/v1", "Deployment", "ns1", "deploy1"),
action: apps.ResourceActionCreate,
},
}
err := s.updateResourceSetStatus(ctx, rs, results)
if err != nil {
t.Fatal(err)
}
got, err := s.client.Resource(resourceSetGVR).Get(ctx, "set1", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
var want unstructured.Unstructured
unmarshalYAML(t, &want, `
apiVersion: apps.cloudrobotics.com/v1alpha1
kind: ResourceSet
metadata:
name: set1
status:
phase: Failed
applied:
failed:
- version: v1
kind: Pod
items:
- name: pod1
namespace: ns1
error: oops
action: Create
applied:
- version: v1
kind: Pod
items:
- name: pod2
namespace: ns1
action: Create
- name: pod1
namespace: ns2
action: Update
- group: apps
version: v1
kind: Deployment
items:
- namespace: ns1
name: deploy1
action: Create
`)
if v, _, _ := unstructured.NestedString(got.Object, "status", "finishedAt"); v == "" {
t.Errorf("finishedAt timestamp was not set")
}
// Remove unknown timestamps before running DeepEqual.
unstructured.RemoveNestedField(got.Object, "status", "startedAt")
unstructured.RemoveNestedField(got.Object, "status", "finishedAt")
if !reflect.DeepEqual(got.Object["status"], want.Object["status"]) {
t.Errorf("expected status:\n%q\nbut got:\n%q", want.Object["status"], got.Object["status"])
}
}
// Hardcode some GVR mappings for easy use in tests. The only other way is
// setting up a full RestMapper.
var gvrs = map[string]schema.GroupVersionResource{
"configmaps": {Version: "v1", Resource: "configmaps"},
"deployments": {Group: "apps", Version: "v1", Resource: "deployments"},
"approllouts": {Group: "apps.cloudrobotics.com", Version: "v1alpha1", Resource: "approllouts"},
}
func TestSynk_applyAllIsUpdatingResources(t *testing.T) {
// We have to use properly typed objects for strategic-merge-patch targets
// as the patch operation will fail otherwise.
var cmBefore, cmUpdate corev1.ConfigMap
unmarshalYAML(t, &cmBefore, `
apiVersion: v1
kind: ConfigMap
metadata:
namespace: foo1
name: cm1
data:
foo1: bar1
foo2: bar2`)
f := newFixture(t)
// cm1 already exists beforehand, so we expect an update.
f.addObjects(&cmBefore)
unmarshalYAML(t, &cmUpdate, `
apiVersion: v1
kind: ConfigMap
metadata:
namespace: foo1
name: cm1
data:
foo2: baz2
foo3: bar3`)
cm := toUnstructured(t, &cmUpdate)
set := &apps.ResourceSet{}
set.Name = "test.v1"
set.UID = "deadbeef"
// Note: We can't test applying an Unstructured object here, as the
// fake client doesn't support strategic merge patches:
// https://github.com/kubernetes/client-go/issues/613
results, err := f.newSynk().applyAll(context.Background(), set, &ApplyOptions{name: "test"},
cm.DeepCopy(),
)
if err != nil {
t.Error(err)
for _, res := range results {
t.Log(res)
}
return
}
_true := true
ownerRef := metav1.OwnerReference{
APIVersion: "apps.cloudrobotics.com/v1alpha1",
Kind: "ResourceSet",
Name: set.Name,
UID: set.UID,
BlockOwnerDeletion: &_true,
}
cm.SetOwnerReferences([]metav1.OwnerReference{ownerRef})
setAppliedAnnotation(cm)
f.expectActions(
k8stest.NewUpdateAction(gvrs["configmaps"], "foo1", cm),
)
f.verifyWriteActions()
}
func TestSynk_applyAllIsCreatingResources(t *testing.T) {
f := newFixture(t)
// Support for a standard merge-patch was only added to the client-go testing
// mock as of kubernetes-1.14.0, which we cannot upgrade to yet.
// Thus we cannot valid the standard merge patch type for CRDs yet.
rollout := newUnstructured("apps.cloudrobotics.com/v1alpha1", "AppRollout", "foo1", "rollout1")
deploy := newUnstructured("apps/v1", "Deployment", "foo2", "dp1")
set := &apps.ResourceSet{}
set.Name = "test.v1"
set.UID = "deadbeef"
results, err := f.newSynk().applyAll(context.Background(), set, &ApplyOptions{name: "test"},
rollout.DeepCopy(),
deploy.DeepCopy(),
)
if err != nil {
t.Error(err)
for _, res := range results {
t.Log(res)
}
return
}
_true := true
ownerRef := metav1.OwnerReference{
APIVersion: "apps.cloudrobotics.com/v1alpha1",
Kind: "ResourceSet",
Name: set.Name,
UID: set.UID,
BlockOwnerDeletion: &_true,
}
rollout.SetOwnerReferences([]metav1.OwnerReference{ownerRef})
deploy.SetOwnerReferences([]metav1.OwnerReference{ownerRef})
setAppliedAnnotation(deploy)
setAppliedAnnotation(rollout)
f.expectActions(
k8stest.NewCreateAction(gvrs["approllouts"], "foo1", rollout),
k8stest.NewCreateAction(gvrs["deployments"], "foo2", deploy),
)
f.verifyWriteActions()
}
func TestSynk_applyAllRetriesResourceExpired(t *testing.T) {
// deploy is the input to applyAll(), annotatedDeploy is the expected output.
// TODO(rodrigoq): change verifyWriteActions() to avoid this boilerplate
deploy := newUnstructured("apps/v1", "Deployment", "foo1", "dp1")
annotatedDeploy := deploy.DeepCopy()
set := &apps.ResourceSet{}
set.Name = "test.v1"
set.UID = "deadbeef"
_true := true
ownerRef := metav1.OwnerReference{
APIVersion: "apps.cloudrobotics.com/v1alpha1",
Kind: "ResourceSet",
Name: set.Name,
UID: set.UID,
BlockOwnerDeletion: &_true,
}
annotatedDeploy.SetOwnerReferences([]metav1.OwnerReference{ownerRef})
setAppliedAnnotation(annotatedDeploy)
emptyPatch := []byte(`{}`)
tests := []struct {
desc string
verb string
objects []runtime.Object
actions []k8stest.Action
}{{
desc: "create deployment returns ResourceExpired",
verb: "create",
objects: []runtime.Object{},
actions: []k8stest.Action{
k8stest.NewCreateAction(gvrs["deployments"], "foo1", annotatedDeploy),
k8stest.NewCreateAction(gvrs["deployments"], "foo1", annotatedDeploy),
},
}, {
desc: "patch deployment returns ResourceExpired",
verb: "patch",
objects: []runtime.Object{annotatedDeploy},
actions: []k8stest.Action{
k8stest.NewPatchAction(gvrs["deployments"], "foo1", "dp1", types.StrategicMergePatchType, emptyPatch),
k8stest.NewPatchAction(gvrs["deployments"], "foo1", "dp1", types.StrategicMergePatchType, emptyPatch),
},
}, {
desc: "update deployment returns ResourceExpired",
verb: "update",
objects: []runtime.Object{deploy},
actions: []k8stest.Action{
k8stest.NewUpdateAction(gvrs["deployments"], "foo1", annotatedDeploy),
k8stest.NewUpdateAction(gvrs["deployments"], "foo1", annotatedDeploy),
},
}}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
f := newFixture(t)
f.addObjects(tc.objects...)
s := f.newSynk()
// ResourceExpired should be treated as a transient error.
f.fake.PrependReactor(tc.verb, "deployments", func(action k8stest.Action) (bool, runtime.Object, error) {
return true, nil, k8serrors.NewResourceExpired("gone")
})
_, err := s.applyAll(context.Background(), set, &ApplyOptions{name: "test"},
deploy.DeepCopy(),
)
if err == nil {
t.Error("applyAll() succeeded unexpectedly, want ResourceExpired")
}
// applyAll() should retry once before failing.
f.expectActions(tc.actions...)
f.verifyWriteActions()
})
}
}
func TestSynk_skipsTestResources(t *testing.T) {
ctx := context.Background()
s := newFixture(t).newSynk()
testPod := newUnstructured("v1", "Pod", "ns", "pod2")
testPod.SetAnnotations(map[string]string{"helm.sh/hook": "test-success"})
_, _, err := s.initialize(context.Background(), &ApplyOptions{name: "test"},
newUnstructured("v1", "Pod", "ns", "pod1"),
testPod,
)
if err != nil {
t.Fatal(err)
}
got, err := s.client.Resource(resourceSetGVR).Get(ctx, "test.v1", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
var want unstructured.Unstructured
unmarshalYAML(t, &want, `
apiVersion: apps.cloudrobotics.com/v1alpha1
kind: ResourceSet
metadata:
labels:
name: test
name: test.v1
spec:
resources:
- version: v1
kind: Pod
items:
- name: pod1
namespace: ns
status:
phase: Pending
`)
if !reflect.DeepEqual(want.Object["spec"], got.Object["spec"]) {
t.Errorf("expected spec\n%v\nbut got\n%v", want.Object["spec"], got.Object["spec"])
}
}
func TestSynk_deleteResourceSets(t *testing.T) {
ctx := context.Background()
nu := func(name, version string) *unstructured.Unstructured {
u := newUnstructured("apps.cloudrobotics.com/v1alpha1", "ResourceSet", "", name+"."+version)
u.SetLabels(map[string]string{"name": name})
return u
}
f := newFixture(t)
f.addObjects(
nu("test", "v2"),
nu("bad_name", ""),
nu("other", "v3"),
nu("test", "v4"),
nu("test", "v7"),
nu("test", "v8"),
)
synk := f.newSynk()
err := synk.deleteResourceSets(ctx, "test", 7)
if err != nil {
t.Fatal(err)
}
f.expectActions(
k8stest.NewRootDeleteAction(resourceSetGVR, "test.v2"),
k8stest.NewRootDeleteAction(resourceSetGVR, "test.v4"),
)
f.verifyWriteActions()
}
func TestSynk_deleteFailedResourceSets(t *testing.T) {
ctx := context.Background()
nu := func(name, version string, failed bool) *unstructured.Unstructured {
u := newUnstructured("apps.cloudrobotics.com/v1alpha1", "ResourceSet", "", name+"."+version)
u.SetLabels(map[string]string{"name": name})
if failed {
unstructured.SetNestedField(u.Object, "Failed", "status", "phase")
}
return u
}
f := newFixture(t)
f.addObjects(
nu("test", "v2", true),
nu("test", "v4", false),
nu("test", "v6", true),
nu("test", "v7", true),
nu("test", "v8", true),
)
synk := f.newSynk()
err := synk.deleteFailedResourceSets(ctx, "test", 7)
if err != nil {
t.Fatal(err)
}
f.expectActions(
k8stest.NewRootDeleteAction(resourceSetGVR, "test.v2"),
k8stest.NewRootDeleteAction(resourceSetGVR, "test.v6"),
)
f.verifyWriteActions()
}
func TestSynk_populateNamespaces(t *testing.T) {
f := newFixture(t)
s := f.newSynk()
var (
ns1 = newUnstructured("v1", "Namespace", "", "ns1")
pod1 = newUnstructured("v1", "Pod", "ns1", "pod1")
pod2 = newUnstructured("v1", "Pod", "", "pod1")
cr1 = newUnstructured("example.org/v1", "Example", "", "pod1")
)
var exampleCRD unstructured.Unstructured
unmarshalYAML(t, &exampleCRD, `
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefition
metadata:
name: examples.example.org
spec:
group: example.org
names:
kind: Example
scope: Namespaced
versions:
- name: v1
served: true
storage: true`)
if err := s.populateNamespaces(
context.Background(),
"ns2",
[]*unstructured.Unstructured{&exampleCRD},
ns1, pod1, pod2, cr1,
); err != nil {
t.Fatal(err)
}
if ns1.GetNamespace() != "" {
t.Errorf("unexpected namespace %q added to ns1", ns1.GetNamespace())
}
if pod1.GetNamespace() != "ns1" {
t.Errorf("unexpected namespace change to %q to pod1", pod1.GetNamespace())
}
if pod2.GetNamespace() != "ns2" {
t.Errorf("unexpected namesapce %q on pod2", pod2.GetNamespace())
}
if cr1.GetNamespace() != "ns2" {
t.Errorf("unexpected namesapce %q on cr1", cr1.GetNamespace())
}
}
func TestSynk_skipLastAppliedAnnotationForLargeResource(t *testing.T) {
yaml := `
apiVersion: v1
kind: ConfigMap
metadata:
namespace: foo1
name: cm1
data:
foo1: ` + strings.Repeat("x", totalAnnotationSizeLimitB)
var cmLarge unstructured.Unstructured
unmarshalYAML(t, &cmLarge, yaml)
if err := setAppliedAnnotation(&cmLarge); err == nil {
t.Errorf("expected error setting large annotation, got nil")
}
if applied := getAppliedAnnotation(&cmLarge); len(applied) > 0 {
t.Errorf("expected no last-applied annotation set, but got %v", applied)
}
}
func newUnstructured(apiVersion, kind, namespace, name string) *unstructured.Unstructured {
var u unstructured.Unstructured
u.SetAPIVersion(apiVersion)
u.SetKind(kind)
u.SetNamespace(namespace)
u.SetName(name)
return &u
}
func unmarshalYAML(t *testing.T, v interface{}, s string) {
t.Helper()
if err := yaml.Unmarshal([]byte(strings.TrimSpace(s)), v); err != nil {
t.Fatal(err)
}
}
func toUnstructured(t *testing.T, o runtime.Object) *unstructured.Unstructured {
var u unstructured.Unstructured
if err := convert(o, &u); err != nil {
t.Fatal(err)
}
return &u
}
// filterReadActions drops read-only actions that we don't care about to verify
// the correct behavior.
func filterReadActions(actions []k8stest.Action) (ret []k8stest.Action) {
for _, a := range actions {
if v := a.GetVerb(); v == "watch" || v == "list" || v == "get" {
continue
}
ret = append(ret, a)
}
return ret
}
func sprintAction(a k8stest.Action) string {
switch v := a.(type) {
case k8stest.DeleteActionImpl:
return fmt.Sprintf("DELETE %s/%s %s/%s", v.Resource, v.Subresource, v.Namespace, v.Name)
case k8stest.CreateActionImpl:
return fmt.Sprintf("CREATE %s/%s %s/%s: %v", v.Resource, v.Subresource, v.Namespace, v.Name, v.Object.(*unstructured.Unstructured))
case k8stest.UpdateActionImpl:
return fmt.Sprintf("UPDATE %s/%s %s: %v", v.Resource, v.Subresource, v.Namespace, v.Object.(*unstructured.Unstructured))
case k8stest.PatchActionImpl:
return fmt.Sprintf("PATCH %s/%s %s/%s: %s %s", v.Resource, v.Subresource, v.Namespace, v.Name, v.PatchType, v.Patch)
default:
return fmt.Sprintf("", a)
}
}
================================================
FILE: src/go/tests/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
load("@rules_shell//shell:sh_test.bzl", "sh_test")
go_test(
name = "go_default_test",
size = "large",
timeout = "long",
srcs = ["k8s_integration_test.go"],
embed = [":go_default_library"],
tags = ["external"],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"//src/go/pkg/kubeutils:go_default_library",
"@com_github_googlecloudrobotics_ilog//:go_default_library",
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured:go_default_library",
"@io_k8s_apimachinery//pkg/runtime:go_default_library",
"@io_k8s_apimachinery//pkg/runtime/schema:go_default_library",
"@io_k8s_client_go//kubernetes:go_default_library",
"@io_k8s_client_go//kubernetes/scheme:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/client:go_default_library",
],
)
sh_test(
name = "relay_test",
srcs = ["relay_test.sh"],
data = [
"@kubernetes_helm//:helm",
],
tags = ["external"],
)
go_library(
name = "go_default_library",
srcs = ["k8s_integration_test_auth_helper.go"],
importpath = "github.com/googlecloudrobotics/core/src/go/tests",
visibility = ["//visibility:public"],
deps = [
"@io_k8s_apimachinery//pkg/util/net:go_default_library",
"@io_k8s_apimachinery//pkg/util/yaml:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
"@io_k8s_client_go//util/jsonpath:go_default_library",
"@io_k8s_klog_v2//:go_default_library",
"@org_golang_x_oauth2//:go_default_library",
"@org_golang_x_oauth2//google:go_default_library",
],
)
================================================
FILE: src/go/tests/apps/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_test")
go_test(
name = "go_default_test",
size = "large",
timeout = "long",
srcs = ["apps_test.go"],
data = [
"//src/app_charts/base:base-test",
"//src/go/cmd/synk",
"@kubernetes_helm//:helm",
],
rundir = ".",
tags = [
"manual",
"requires-access-token",
"requires-docker",
],
deps = [
"//src/go/pkg/apis/apps/v1alpha1:go_default_library",
"//src/go/pkg/kubetest:go_default_library",
"@com_github_cenkalti_backoff//:go_default_library",
"@io_k8s_api//apps/v1:go_default_library",
"@io_k8s_api//core/v1:go_default_library",
"@io_k8s_apimachinery//pkg/api/errors:go_default_library",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_sigs_controller_runtime//pkg/client:go_default_library",
],
)
================================================
FILE: src/go/tests/apps/apps_test.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package apps
import (
"os"
"testing"
"time"
"github.com/cenkalti/backoff"
crcapps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
"github.com/googlecloudrobotics/core/src/go/pkg/kubetest"
apps "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
robotClusterName = "robot"
inlineChartTemplate = `
apiVersion: apps.cloudrobotics.com/v1alpha1
kind: ChartAssignment
metadata:
name: {{ .name }}
namespace: default
spec:
clusterName: {{ .cluster }}
namespaceName: {{ .namespace }}
chart:
inline: "{{ .chart }}"`
goodDeployment = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: test
spec:
replicas: 1
selector:
matchLabels:
app: test
template:
metadata:
labels:
app: test
spec:
containers:
- name: test
image: "gcr.io/google-containers/busybox:latest"
args: ["sleep", "999999999"]`
deploymentWithBadLabels = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: test
spec:
replicas: 1
selector:
matchLabels:
app: test
template:
metadata:
labels:
app: this-label-does-not-match
spec:
containers:
- name: test
image: "gcr.io/google-containers/busybox:latest"
args: ["sleep", "999999999"]`
badJob = `
apiVersion: batch/v1
kind: Job
metadata:
name: test
spec:
template:
metadata:
labels:
app: test
spec:
restartPolicy: Never
containers:
- name: test
image: "gcr.io/google-containers/busybox:latest"
args: ["false"]`
goodJob = `
apiVersion: batch/v1
kind: Job
metadata:
name: test
spec:
template:
metadata:
labels:
app: test
spec:
restartPolicy: Never
containers:
- name: test
image: "gcr.io/google-containers/busybox:latest"
args: ["true"]`
)
func TestAll(t *testing.T) {
env := kubetest.New(t, kubetest.Config{
Clusters: []kubetest.ClusterConfig{
{Name: robotClusterName},
},
SchemeFunc: crcapps.AddToScheme,
})
defer env.Teardown()
env.InstallChartArchive(
robotClusterName, "base-test", "default",
"src/app_charts/base/base-test-0.0.1.tgz",
map[string]string{
"project": "",
"robot.name": robotClusterName,
"registry": os.Getenv("REGISTRY"),
"webhook.enabled": "false",
},
)
if err := backoff.Retry(
func() error {
return kubetest.DeploymentReady(env.Ctx(), env.Client(robotClusterName), "default", "chart-assignment-controller")
},
backoff.WithMaxRetries(backoff.NewConstantBackOff(3*time.Second), 40),
); err != nil {
t.Errorf("wait for chart-assignment-controller: %s", err)
t.Fatalf("maybe REGISTRY or ACCESS_TOKEN is not set?")
}
env.Run(
testCreateChartAssignment_WithChartReference_Works,
testCreateChartAssignment_WithInlineChart_BecomesReady,
testCreateChartAssignment_WithBadDeployment_BecomesFailed,
testUpdateChartAssignment_WithFixedDeployment_BecomesReady,
testUpdateChartAssignment_WithFixedJob_BecomesReady,
testCreateChartAssignment_CopiesLabelledSecret,
)
}
func testCreateChartAssignment_WithChartReference_Works(t *testing.T, f *kubetest.Fixture) {
robot := f.Client(robotClusterName)
// for the version, run:
// helm repo add oauth2-proxy https://oauth2-proxy.github.io/manifests
// helm repo update
// helm search oauth2-proxy/oauth2-proxy -l
// Important: we need a V1 chart!
tmpl := `
apiVersion: apps.cloudrobotics.com/v1alpha1
kind: ChartAssignment
metadata:
name: {{ .name }}
namespace: default
spec:
clusterName: {{ .cluster }}
namespaceName: {{ .namespace }}
chart:
repository: https://oauth2-proxy.github.io/manifests
name: oauth2-proxy
version: 3.2.6
values:
fullnameOverride: test
`
data := map[string]string{
"cluster": robotClusterName,
"name": f.Uniq("example"),
"namespace": f.Uniq("ns"),
}
var ca crcapps.ChartAssignment
f.FromYAML(tmpl, data, &ca)
if err := robot.Create(f.Ctx(), &ca); err != nil {
t.Fatalf("create ChartAssignment: %s", err)
}
if err := backoff.Retry(
f.ChartAssignmentHasStatus(&ca, crcapps.ChartAssignmentPhaseSettled),
backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 60),
); err != nil {
t.Fatalf("wait for chart assignment settled: %s", err)
}
// We should find a deployment in its own namespace now.
var dep apps.Deployment
if err := robot.Get(f.Ctx(), client.ObjectKey{
Namespace: data["namespace"],
Name: "test",
}, &dep); err != nil {
t.Errorf("failed to get deployment: %s", err)
}
// Chart should've been deployed exactly once.
if want, got := int64(1), ca.Status.ObservedGeneration; want != got {
t.Errorf("want ca.Status.ObservedGeneration == %d, got %d", want, got)
}
}
func testCreateChartAssignment_WithInlineChart_BecomesReady(t *testing.T, f *kubetest.Fixture) {
robot := f.Client(robotClusterName)
data := map[string]string{
"cluster": robotClusterName,
"name": f.Uniq("example"),
"namespace": f.Uniq("ns"),
"chart": kubetest.BuildInlineChart(t, "example", goodDeployment /*values=*/, ""),
}
var ca crcapps.ChartAssignment
f.FromYAML(inlineChartTemplate, data, &ca)
if err := robot.Create(f.Ctx(), &ca); err != nil {
t.Fatalf("create ChartAssignment: %s", err)
}
if err := backoff.Retry(
f.ChartAssignmentHasStatus(&ca, crcapps.ChartAssignmentPhaseReady),
backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 60),
); err != nil {
t.Fatalf("wait for chart assignment ready: %s", err)
}
}
func testCreateChartAssignment_WithBadDeployment_BecomesFailed(t *testing.T, f *kubetest.Fixture) {
robot := f.Client(robotClusterName)
data := map[string]string{
"cluster": robotClusterName,
"name": f.Uniq("example"),
"namespace": f.Uniq("ns"),
"chart": kubetest.BuildInlineChart(t, "example", deploymentWithBadLabels /*values=*/, ""),
}
var ca crcapps.ChartAssignment
f.FromYAML(inlineChartTemplate, data, &ca)
if err := robot.Create(f.Ctx(), &ca); err != nil {
t.Fatalf("create ChartAssignment: %s", err)
}
if err := backoff.Retry(
f.ChartAssignmentHasStatus(&ca, crcapps.ChartAssignmentPhaseFailed),
backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 60),
); err != nil {
t.Fatalf("wait for chart assignment to fail: %s", err)
}
}
func testUpdateChartAssignment_WithFixedDeployment_BecomesReady(t *testing.T, f *kubetest.Fixture) {
robot := f.Client(robotClusterName)
// First, create a bad ChartAssignment and wait for it to fail.
data := map[string]string{
"cluster": robotClusterName,
"name": f.Uniq("example"),
"namespace": f.Uniq("ns"),
"chart": kubetest.BuildInlineChart(t, "example", deploymentWithBadLabels /*values=*/, ""),
}
var ca crcapps.ChartAssignment
f.FromYAML(inlineChartTemplate, data, &ca)
if err := robot.Create(f.Ctx(), &ca); err != nil {
t.Fatalf("create ChartAssignment: %s", err)
}
if err := backoff.Retry(
f.ChartAssignmentHasStatus(&ca, crcapps.ChartAssignmentPhaseFailed),
backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 60),
); err != nil {
t.Fatalf("wait for chart assignment to fail: %s", err)
}
// Next, fix the ChartAssignment and wait for it to become ready. We have
// to retry the read-modify-write in case the controller updates the status
// in parallel.
if err := backoff.Retry(func() error {
if err := robot.Get(f.Ctx(), f.ObjectKey(&ca), &ca); err != nil {
return backoff.Permanent(err)
}
ca.Spec.Chart.Inline = kubetest.BuildInlineChart(t, "example", goodDeployment /*values=*/, "")
if err := robot.Update(f.Ctx(), &ca); apierrors.IsConflict(err) {
return err
} else if err != nil {
return backoff.Permanent(err)
}
return nil
}, backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 60),
); err != nil {
t.Fatalf("update ChartAssignment: %s %s", apierrors.ReasonForError(err), err)
}
if err := backoff.Retry(
f.ChartAssignmentHasStatus(&ca, crcapps.ChartAssignmentPhaseReady),
backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 60),
); err != nil {
t.Fatalf("wait for chart assignment to go from Failed to Ready: %s", err)
}
}
func testUpdateChartAssignment_WithFixedJob_BecomesReady(t *testing.T, f *kubetest.Fixture) {
robot := f.Client(robotClusterName)
// First, create a ChartAssignment with a bad job and wait for it to settle.
data := map[string]string{
"cluster": robotClusterName,
"name": f.Uniq("example"),
"namespace": f.Uniq("ns"),
"chart": kubetest.BuildInlineChart(t, "example", badJob /*values=*/, ""),
}
var ca crcapps.ChartAssignment
f.FromYAML(inlineChartTemplate, data, &ca)
if err := robot.Create(f.Ctx(), &ca); err != nil {
t.Fatalf("create ChartAssignment: %s", err)
}
if err := backoff.Retry(
f.ChartAssignmentHasStatus(&ca, crcapps.ChartAssignmentPhaseSettled),
backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 60),
); err != nil {
t.Fatalf("wait for chart assignment to settle: %s", err)
}
// Next, change the ChartAssignment and wait for it to become ready. We have
// to retry the read-modify-write in case the controller updates the status
// in parallel.
if err := backoff.Retry(func() error {
if err := robot.Get(f.Ctx(), f.ObjectKey(&ca), &ca); err != nil {
return backoff.Permanent(err)
}
ca.Spec.Chart.Inline = kubetest.BuildInlineChart(t, "example", goodJob /*values=*/, "")
if err := robot.Update(f.Ctx(), &ca); apierrors.IsConflict(err) {
return err
} else if err != nil {
return backoff.Permanent(err)
}
return nil
}, backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 60),
); err != nil {
t.Fatalf("update ChartAssignment: %s %s", apierrors.ReasonForError(err), err)
}
if err := backoff.Retry(
f.ChartAssignmentHasStatus(&ca, crcapps.ChartAssignmentPhaseReady),
backoff.WithMaxRetries(backoff.NewConstantBackOff(3*time.Second), 60),
); err != nil {
t.Fatalf("wait for chart assignment to go from Settled to Ready: %s", err)
}
}
func testCreateChartAssignment_CopiesLabelledSecret(t *testing.T, f *kubetest.Fixture) {
robot := f.Client(robotClusterName)
const secretName = "my-secret"
if err := robot.Create(f.Ctx(), &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: "default",
Labels: map[string]string{
"cloudrobotics.com/copy-to-chart-namespaces": "true",
},
},
Type: corev1.SecretTypeOpaque,
}); err != nil {
t.Fatalf("create Secret default/my-secret: %s", err)
}
data := map[string]string{
"cluster": robotClusterName,
"name": f.Uniq("example"),
"namespace": f.Uniq("ns"),
"chart": kubetest.BuildInlineChart(t, "example", goodDeployment /*values=*/, ""),
}
var ca crcapps.ChartAssignment
f.FromYAML(inlineChartTemplate, data, &ca)
if err := robot.Create(f.Ctx(), &ca); err != nil {
t.Fatalf("create ChartAssignment: %s", err)
}
if err := backoff.Retry(
f.ChartAssignmentHasStatus(&ca, crcapps.ChartAssignmentPhaseSettled),
backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 60),
); err != nil {
t.Fatalf("wait for chart assignment settled: %s", err)
}
// We should find a secret in its own namespace now.
var s corev1.Secret
if err := robot.Get(f.Ctx(), client.ObjectKey{
Namespace: data["namespace"],
Name: secretName,
}, &s); err != nil {
t.Errorf("failed to get Secret %s/%s: %s", data["namespace"], secretName, err)
}
}
================================================
FILE: src/go/tests/apps/run.sh
================================================
#!/usr/bin/env bash
#
# Copyright 2019 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: if you are running a minikube using vmdriver=none on the same machine
# this test (or more precisely kind) does not seem to work - see
# https://github.com/kubernetes-sigs/kind/issues/2516
set -e
echo "Set NO_TEARDOWN=y for preserve \"kind\" clusters past test failures"
echo "run 'docker ps | grep kind' and 'docker stop ' to cleanup when done"
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CLOUD_ROBOTICS_CONTAINER_REGISTRY=${CLOUD_ROBOTICS_CONTAINER_REGISTRY:-$1}
if [[ -z "${CLOUD_ROBOTICS_CONTAINER_REGISTRY}" ]]; then
echo "Usage: $0 "
exit 1
fi
TAG="latest" bazel run //src/app_charts:push "${CLOUD_ROBOTICS_CONTAINER_REGISTRY}"
bazel build //src/go/tests/apps:go_default_test
# Run the artifact directly so the kind clusters can be easily accessed for
# debugging.
cd ${DIR}/../../../../bazel-bin/src/go/tests/apps/go_default_test_/go_default_test.runfiles/_main
ACCESS_TOKEN="$(gcloud auth application-default print-access-token)" \
REGISTRY="${CLOUD_ROBOTICS_CONTAINER_REGISTRY}" \
../../go_default_test
================================================
FILE: src/go/tests/k8s_integration_test.go
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"testing"
"time"
apps "github.com/googlecloudrobotics/core/src/go/pkg/apis/apps/v1alpha1"
"github.com/googlecloudrobotics/core/src/go/pkg/kubeutils"
"github.com/googlecloudrobotics/ilog"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
)
const (
appInitializationTimeout = 7 * time.Minute
podInitializationTimeout = 5 * time.Minute
)
func checkHealthOfKubernetesCluster(ctx context.Context, kubernetesContext string) error {
// create the kubernetes clientSet
k8sCfg, err := kubeutils.LoadOutOfClusterConfig(kubernetesContext)
if err != nil {
return fmt.Errorf("Loading of kubernetes config failed: %v", err)
}
clientSet, err := kubernetes.NewForConfig(k8sCfg)
if err != nil {
return fmt.Errorf("Creating the kubernetes client set failed: %v", err)
}
numNonRunningPods := 0
failingContainers := 0
restartCount := make(map[string]int32)
timeStart := time.Now()
for time.Since(timeStart) < podInitializationTimeout {
slog.Info("Querying pods...", slog.String("Context", kubernetesContext))
pods, err := clientSet.CoreV1().Pods("").List(ctx, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("Failed to query pods: %v", err)
}
slog.Info("...done.", slog.Int("PodCount", len(pods.Items)))
if len(pods.Items) == 0 {
return fmt.Errorf("Could not find any pods in cluster")
}
numNonRunningPods = 0
failingContainers = 0
for _, pod := range pods.Items {
slog.Info("Pod state", slog.String("Name", pod.Name), slog.String("Phase", string(pod.Status.Phase)))
if pod.Status.Phase != "Running" && pod.Status.Phase != "Succeeded" {
numNonRunningPods += 1
break
}
waitingContainerFound := false
for _, container := range pod.Status.ContainerStatuses {
// Exactly one of Running/Terminated/Waiting in container.State is set
if container.State.Running != nil {
restartKey := pod.Name + container.Name
prevRestarts, ok := restartCount[restartKey]
if !ok {
prevRestarts = 0
}
if container.RestartCount > prevRestarts {
slog.Warn("Container restarted",
slog.String("Pod", pod.Name),
slog.String("Container", container.Name),
slog.String("Image", container.Image),
slog.Int("RestartCount", int(container.RestartCount)))
failingContainers += 1
}
restartCount[restartKey] = container.RestartCount
} else if container.State.Terminated != nil && container.State.Terminated.ExitCode != 0 {
slog.Warn("Container terminated",
slog.String("Pod", pod.Name),
slog.String("Container", container.Name),
slog.String("Image", container.Image),
slog.Int("RestartCount", int(container.RestartCount)))
failingContainers += 1
} else if container.State.Waiting != nil {
slog.Warn("Container waiting",
slog.String("Pod", pod.Name),
slog.String("Container", container.Name),
slog.String("Image", container.Image),
slog.Int("RestartCount", int(container.RestartCount)))
waitingContainerFound = true
}
}
if waitingContainerFound {
numNonRunningPods += 1
}
}
if numNonRunningPods == 0 && failingContainers == 0 {
break
}
time.Sleep(10 * time.Second)
}
if numNonRunningPods != 0 || failingContainers != 0 {
return fmt.Errorf("Unhealthy cluster status after waiting for %d sec: %d non-running pods, %d failing containers\n",
podInitializationTimeout/time.Second, numNonRunningPods, failingContainers)
}
slog.Info("All pods are happily running :)")
return nil
}
// convert a resource from one type representation to another one.
func convert(from, to runtime.Object) error {
b, err := json.Marshal(from)
if err != nil {
return err
}
return json.Unmarshal(b, &to)
}
func TestCloudClusterAppStatus(t *testing.T) {
kubernetesContext, err := kubeutils.GetCloudKubernetesContext()
if err != nil {
t.Error(err)
}
k8sCfg, err := kubeutils.LoadOutOfClusterConfig(kubernetesContext)
if err != nil {
t.Errorf("Loading of kubernetes config failed: %v", err)
}
sc := runtime.NewScheme()
scheme.AddToScheme(sc)
apps.AddToScheme(sc)
client, err := ctrlclient.New(k8sCfg, ctrlclient.Options{Scheme: sc})
if err != nil {
t.Errorf("Failed to create kubernetes client: %v", err)
}
numBadConditions := 0
timeStart := time.Now()
for time.Since(timeStart) < appInitializationTimeout {
appRollouts := &unstructured.UnstructuredList{}
appRollouts.SetGroupVersionKind(schema.GroupVersionKind{
Group: "apps.cloudrobotics.com",
Kind: "AppRollout",
Version: "v1alpha1",
})
slog.Info("Querying AppRollouts...", slog.String("Context", kubernetesContext))
err = client.List(context.Background(), appRollouts)
if err != nil {
slog.Error("Failed to list AppRollouts", ilog.Err(err))
time.Sleep(10 * time.Second)
continue
}
slog.Info("...done.", slog.Int("AppRolloutCount", len(appRollouts.Items)))
numBadConditions = 0
for _, i := range appRollouts.Items {
ar := &apps.AppRollout{}
if err := convert(&i, ar); err != nil {
t.Errorf("Failed to unmarshall AppRollout: %v", err)
}
for _, c := range ar.Status.Conditions {
slog.Info("AppRollout condition", slog.String("Name", ar.GetName()), slog.String("Condition", string(c.Type)), slog.String("Status", string(c.Status)))
if c.Status != corev1.ConditionTrue {
slog.Warn("AppRollout condition not met", slog.String("Name", ar.GetName()), slog.String("Condition", string(c.Type)))
numBadConditions += 1
}
}
}
if numBadConditions == 0 {
break
}
time.Sleep(15 * time.Second)
}
if numBadConditions != 0 {
t.Errorf("Unhealthy AppRollout status after waiting for %d sec: %d conditions not met\n",
appInitializationTimeout/time.Second, numBadConditions)
}
}
func TestKubernetesCloudClusterStatus(t *testing.T) {
ctx := context.Background()
kubernetesCloudContext, err := kubeutils.GetCloudKubernetesContext()
if err != nil {
t.Error(err)
}
if err := checkHealthOfKubernetesCluster(ctx, kubernetesCloudContext); err != nil {
t.Errorf("Cloud cluster %s: %v", kubernetesCloudContext, err)
}
}
func TestKubernetesRobotClusterStatus(t *testing.T) {
ctx := context.Background()
kubernetesRobotContext, err := kubeutils.GetRobotKubernetesContext()
if err != nil {
t.Error(err)
}
if err := checkHealthOfKubernetesCluster(ctx, kubernetesRobotContext); err != nil {
t.Errorf("Robot cluster %s: %v", kubernetesRobotContext, err)
}
}
================================================
FILE: src/go/tests/k8s_integration_test_auth_helper.go
================================================
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// GIANT NOTE: methylDragon@
// This is a verbatim copy of the state of the Kubernetes auth provider before it was REMOVED in
// https://github.com/kubernetes/client-go/commit/7d208ba573ecc2c6294482b51872021718f76cb4
//
// With migration recommendation:
// https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke
//
// Unfortunately, despite installing the package as per that recommendation in the cloudbuild machine,
// the test doesn't seem to accept the plugin. (It could be that it is running in a different machine.)
// So the plugin is reintroduced here to allow for GKE auth to proceed as part of a cloudbuild test.
package main
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"os/exec"
"strings"
"sync"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/yaml"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/util/jsonpath"
"k8s.io/klog/v2"
)
func init() {
if err := restclient.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil {
klog.Fatalf("Failed to register gcp auth plugin: %v", err)
}
}
var (
// Stubbable for testing
execCommand = exec.Command
// defaultScopes:
// - cloud-platform is the base scope to authenticate to GCP.
// - userinfo.email is used to authenticate to GKE APIs with gserviceaccount
// email instead of numeric uniqueID.
defaultScopes = []string{
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/userinfo.email"}
)
// gcpAuthProvider is an auth provider plugin that uses GCP credentials to provide
// tokens for kubectl to authenticate itself to the apiserver. A sample json config
// is provided below with all recognized options described.
//
// {
// 'auth-provider': {
// # Required
// "name": "gcp",
//
// 'config': {
// # Authentication options
// # These options are used while getting a token.
//
// # comma-separated list of GCP API scopes. default value of this field
// # is "https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/userinfo.email".
// # to override the API scopes, specify this field explicitly.
// "scopes": "https://www.googleapis.com/auth/cloud-platform"
//
// # Caching options
//
// # Raw string data representing cached access token.
// "access-token": "ya29.CjWdA4GiBPTt",
// # RFC3339Nano expiration timestamp for cached access token.
// "expiry": "2016-10-31 22:31:9.123",
//
// # Command execution options
// # These options direct the plugin to execute a specified command and parse
// # token and expiry time from the output of the command.
//
// # Command to execute for access token. Command output will be parsed as JSON.
// # If "cmd-args" is not present, this value will be split on whitespace, with
// # the first element interpreted as the command, remaining elements as args.
// "cmd-path": "/usr/bin/gcloud",
//
// # Arguments to pass to command to execute for access token.
// "cmd-args": "config config-helper --output=json"
//
// # JSONPath to the string field that represents the access token in
// # command output. If omitted, defaults to "{.access_token}".
// "token-key": "{.credential.access_token}",
//
// # JSONPath to the string field that represents expiration timestamp
// # of the access token in the command output. If omitted, defaults to
// # "{.token_expiry}"
// "expiry-key": ""{.credential.token_expiry}",
//
// # golang reference time in the format that the expiration timestamp uses.
// # If omitted, defaults to time.RFC3339Nano
// "time-fmt": "2006-01-02 15:04:05.999999999"
// }
// }
// }
type gcpAuthProvider struct {
tokenSource oauth2.TokenSource
persister restclient.AuthProviderConfigPersister
}
var warnOnce sync.Once
func newGCPAuthProvider(_ string, gcpConfig map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) {
warnOnce.Do(func() {
klog.Warningf(`WARNING: the gcp auth plugin is deprecated in v1.22+, unavailable in v1.26+; use gcloud instead.
To learn more, consult https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke`)
})
ts, err := tokenSource(isCmdTokenSource(gcpConfig), gcpConfig)
if err != nil {
return nil, err
}
cts, err := newCachedTokenSource(gcpConfig["access-token"], gcpConfig["expiry"], persister, ts, gcpConfig)
if err != nil {
return nil, err
}
return &gcpAuthProvider{cts, persister}, nil
}
func isCmdTokenSource(gcpConfig map[string]string) bool {
_, ok := gcpConfig["cmd-path"]
return ok
}
func tokenSource(isCmd bool, gcpConfig map[string]string) (oauth2.TokenSource, error) {
// Command-based token source
if isCmd {
cmd := gcpConfig["cmd-path"]
if len(cmd) == 0 {
return nil, fmt.Errorf("missing access token cmd")
}
if gcpConfig["scopes"] != "" {
return nil, fmt.Errorf("scopes can only be used when kubectl is using a gcp service account key")
}
var args []string
if cmdArgs, ok := gcpConfig["cmd-args"]; ok {
args = strings.Fields(cmdArgs)
} else {
fields := strings.Fields(cmd)
cmd = fields[0]
args = fields[1:]
}
return newCmdTokenSource(cmd, args, gcpConfig["token-key"], gcpConfig["expiry-key"], gcpConfig["time-fmt"]), nil
}
// Google Application Credentials-based token source
scopes := parseScopes(gcpConfig)
ts, err := google.DefaultTokenSource(context.Background(), scopes...)
if err != nil {
return nil, fmt.Errorf("cannot construct google default token source: %v", err)
}
return ts, nil
}
// parseScopes constructs a list of scopes that should be included in token source
// from the config map.
func parseScopes(gcpConfig map[string]string) []string {
scopes, ok := gcpConfig["scopes"]
if !ok {
return defaultScopes
}
if scopes == "" {
return []string{}
}
return strings.Split(gcpConfig["scopes"], ",")
}
func (g *gcpAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper {
var resetCache map[string]string
if cts, ok := g.tokenSource.(*cachedTokenSource); ok {
resetCache = cts.baseCache()
} else {
resetCache = make(map[string]string)
}
return &conditionalTransport{&oauth2.Transport{Source: g.tokenSource, Base: rt}, g.persister, resetCache}
}
func (g *gcpAuthProvider) Login() error { return nil }
type cachedTokenSource struct {
lk sync.Mutex
source oauth2.TokenSource
accessToken string `datapolicy:"token"`
expiry time.Time
persister restclient.AuthProviderConfigPersister
cache map[string]string
}
func newCachedTokenSource(accessToken, expiry string, persister restclient.AuthProviderConfigPersister, ts oauth2.TokenSource, cache map[string]string) (*cachedTokenSource, error) {
var expiryTime time.Time
if parsedTime, err := time.Parse(time.RFC3339Nano, expiry); err == nil {
expiryTime = parsedTime
}
if cache == nil {
cache = make(map[string]string)
}
return &cachedTokenSource{
source: ts,
accessToken: accessToken,
expiry: expiryTime,
persister: persister,
cache: cache,
}, nil
}
func (t *cachedTokenSource) Token() (*oauth2.Token, error) {
tok := t.cachedToken()
if tok.Valid() && !tok.Expiry.IsZero() {
return tok, nil
}
tok, err := t.source.Token()
if err != nil {
return nil, err
}
cache := t.update(tok)
if t.persister != nil {
if err := t.persister.Persist(cache); err != nil {
klog.V(4).Infof("Failed to persist token: %v", err)
}
}
return tok, nil
}
func (t *cachedTokenSource) cachedToken() *oauth2.Token {
t.lk.Lock()
defer t.lk.Unlock()
return &oauth2.Token{
AccessToken: t.accessToken,
TokenType: "Bearer",
Expiry: t.expiry,
}
}
func (t *cachedTokenSource) update(tok *oauth2.Token) map[string]string {
t.lk.Lock()
defer t.lk.Unlock()
t.accessToken = tok.AccessToken
t.expiry = tok.Expiry
ret := map[string]string{}
for k, v := range t.cache {
ret[k] = v
}
ret["access-token"] = t.accessToken
ret["expiry"] = t.expiry.Format(time.RFC3339Nano)
return ret
}
// baseCache is the base configuration value for this TokenSource, without any cached ephemeral tokens.
func (t *cachedTokenSource) baseCache() map[string]string {
t.lk.Lock()
defer t.lk.Unlock()
ret := map[string]string{}
for k, v := range t.cache {
ret[k] = v
}
delete(ret, "access-token")
delete(ret, "expiry")
return ret
}
type commandTokenSource struct {
cmd string
args []string
tokenKey string `datapolicy:"token"`
expiryKey string `datapolicy:"secret-key"`
timeFmt string
}
func newCmdTokenSource(cmd string, args []string, tokenKey, expiryKey, timeFmt string) *commandTokenSource {
if len(timeFmt) == 0 {
timeFmt = time.RFC3339Nano
}
if len(tokenKey) == 0 {
tokenKey = "{.access_token}"
}
if len(expiryKey) == 0 {
expiryKey = "{.token_expiry}"
}
return &commandTokenSource{
cmd: cmd,
args: args,
tokenKey: tokenKey,
expiryKey: expiryKey,
timeFmt: timeFmt,
}
}
func (c *commandTokenSource) Token() (*oauth2.Token, error) {
fullCmd := strings.Join(append([]string{c.cmd}, c.args...), " ")
cmd := execCommand(c.cmd, c.args...)
var stderr bytes.Buffer
cmd.Stderr = &stderr
output, err := cmd.Output()
if err != nil {
return nil, fmt.Errorf("error executing access token command %q: err=%v output=%s stderr=%s", fullCmd, err, output, string(stderr.Bytes()))
}
token, err := c.parseTokenCmdOutput(output)
if err != nil {
return nil, fmt.Errorf("error parsing output for access token command %q: %v", fullCmd, err)
}
return token, nil
}
func (c *commandTokenSource) parseTokenCmdOutput(output []byte) (*oauth2.Token, error) {
output, err := yaml.ToJSON(output)
if err != nil {
return nil, err
}
var data interface{}
if err := json.Unmarshal(output, &data); err != nil {
return nil, err
}
accessToken, err := parseJSONPath(data, "token-key", c.tokenKey)
if err != nil {
return nil, fmt.Errorf("error parsing token-key %q from %q: %v", c.tokenKey, string(output), err)
}
expiryStr, err := parseJSONPath(data, "expiry-key", c.expiryKey)
if err != nil {
return nil, fmt.Errorf("error parsing expiry-key %q from %q: %v", c.expiryKey, string(output), err)
}
var expiry time.Time
if t, err := time.Parse(c.timeFmt, expiryStr); err != nil {
klog.V(4).Infof("Failed to parse token expiry from %s (fmt=%s): %v", expiryStr, c.timeFmt, err)
} else {
expiry = t
}
return &oauth2.Token{
AccessToken: accessToken,
TokenType: "Bearer",
Expiry: expiry,
}, nil
}
func parseJSONPath(input interface{}, name, template string) (string, error) {
j := jsonpath.New(name)
buf := new(bytes.Buffer)
if err := j.Parse(template); err != nil {
return "", err
}
if err := j.Execute(buf, input); err != nil {
return "", err
}
return buf.String(), nil
}
type conditionalTransport struct {
oauthTransport *oauth2.Transport
persister restclient.AuthProviderConfigPersister
resetCache map[string]string
}
var _ net.RoundTripperWrapper = &conditionalTransport{}
func (t *conditionalTransport) RoundTrip(req *http.Request) (*http.Response, error) {
if len(req.Header.Get("Authorization")) != 0 {
return t.oauthTransport.Base.RoundTrip(req)
}
res, err := t.oauthTransport.RoundTrip(req)
if err != nil {
return nil, err
}
if res.StatusCode == 401 {
klog.V(4).Infof("The credentials that were supplied are invalid for the target cluster")
t.persister.Persist(t.resetCache)
}
return res, nil
}
func (t *conditionalTransport) WrappedRoundTripper() http.RoundTripper { return t.oauthTransport.Base }
================================================
FILE: src/go/tests/relay/BUILD.bazel
================================================
load("@io_bazel_rules_go//go:def.bzl", "go_test")
# gazelle:go_test file
go_test(
name = "in_process_relay_test",
size = "small",
srcs = ["in_process_relay_test.go"],
# https://github.com/googlecloudrobotics/core/issues/507
flaky = True,
deps = [
"//src/go/cmd/http-relay-client/client:go_default_library",
"//src/go/cmd/http-relay-server/server:go_default_library",
"@com_github_golang_glog//:go_default_library",
],
)
go_test(
name = "nok8s_relay_test",
size = "small",
srcs = ["nok8s_relay_test.go"],
data = [
"//src/go/cmd/http-relay-client:http-relay-client-app",
"//src/go/cmd/http-relay-server:http-relay-server-app",
],
rundir = ".",
deps = [
"//src/go/cmd/http-relay-client/client:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//interop/grpc_testing:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_x_net//websocket:go_default_library",
],
)
================================================
FILE: src/go/tests/relay/in_process_relay_test.go
================================================
package main
import (
"bytes"
"context"
"fmt"
"io"
"net"
"net/http"
"sync"
"testing"
"time"
"github.com/golang/glog"
"github.com/googlecloudrobotics/core/src/go/cmd/http-relay-client/client"
"github.com/googlecloudrobotics/core/src/go/cmd/http-relay-server/server"
)
var (
relayPort int // Will be initialized by initRelay()
backendPort int // Will be initialized by initRelay()
blockSize = 10 * 1024
once sync.Once
)
func pickUnusedPortOrDie() int {
var addr *net.TCPAddr
var err error
if addr, err = net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
var list *net.TCPListener
if list, err = net.ListenTCP("tcp", addr); err == nil {
defer list.Close()
return list.Addr().(*net.TCPAddr).Port
}
}
glog.Fatal("Failed to pick a free TCP port.")
return 0
}
func initRelay() {
once.Do(func() {
glog.Info("Running init.")
backendPort = pickUnusedPortOrDie()
relayPort = pickUnusedPortOrDie()
glog.Infof("Setting up relay.\n\tBackend port: %d\n\tRelay port: %d", backendPort, relayPort)
go func() {
relayServer := server.NewServer(server.Config{
Port: relayPort,
BlockSize: blockSize,
})
relayServer.Start()
}()
go func() {
config := client.DefaultClientConfig()
config.RelayScheme = "http"
config.RelayAddress = fmt.Sprint("127.0.0.1:", relayPort)
config.BackendScheme = "http"
config.BackendAddress = fmt.Sprint("127.0.0.1:", backendPort)
config.DisableAuthForRemote = true
relayClient := client.NewClient(config)
relayClient.Start()
}()
relayHealthy := false
deadline := time.Now().Add(5 * time.Second)
for time.Now().Before(deadline) {
relayHealthzAddr := fmt.Sprint("http://127.0.0.1:", relayPort, "/healthz")
res, err := http.Get(relayHealthzAddr)
if err != nil {
glog.Infof("Relay server is has not yet started, retrying.")
time.Sleep(250 * time.Millisecond)
} else {
glog.Info("Relay server is up and running.")
relayHealthy = true
defer res.Body.Close()
io.ReadAll(res.Body)
break
}
}
if !relayHealthy {
glog.Fatal("Failed to bring up http relay for unknown reason.")
}
})
}
func serveFunction(
f func(w http.ResponseWriter, r *http.Request)) *http.Server {
return serveFunctionWithTimeout(f, 10*time.Second)
}
func serveFunctionWithTimeout(
f func(w http.ResponseWriter, r *http.Request),
handlerTimeout time.Duration) *http.Server {
httpHandlerFunc := http.HandlerFunc(f)
srv := &http.Server{
Addr: fmt.Sprint("127.0.0.1:", backendPort),
ReadTimeout: 5 * time.Second, // Time between accepted connection and request body being read.
WriteTimeout: 5 * time.Second, // Time between request header being read and response body being written.
Handler: http.TimeoutHandler(httpHandlerFunc, handlerTimeout, "Timeout"),
}
go func() {
srv.ListenAndServe()
}()
return srv
}
func TestHttpResponse(t *testing.T) {
initRelay()
expectedResponse := []byte("Unit test response.")
// Setup a backend function which just serves a string.
httpServer := serveFunction(func(w http.ResponseWriter, r *http.Request) {
w.Write(expectedResponse)
})
defer httpServer.Shutdown(context.Background())
// Invoke the backend function through the relay.
relayAddress := fmt.Sprint("http://127.0.0.1:", relayPort, "/client/server_name/")
res, err := http.Get(relayAddress)
if err != nil {
t.Error("Server responeded with an error. Error %v", err)
}
defer res.Body.Close()
observedResponse, err := io.ReadAll(res.Body)
if !bytes.Equal(observedResponse, expectedResponse) {
t.Errorf("Received wrong response.\n\tExpected: %s\n\tObserved: %s", expectedResponse, observedResponse)
}
}
func TestHttpTimeout(t *testing.T) {
initRelay()
// Setup a backend server which will create a timeout and result in a 503.
httpServer := serveFunctionWithTimeout(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(2 * time.Second)
}, 1*time.Second)
defer httpServer.Shutdown(context.Background())
// Hit the backend function through the relay and verify that the relay
// forwards the 503 timeout.
relayAddress := fmt.Sprint("http://127.0.0.1:", relayPort, "/client/server_name/")
res, err := http.Get(relayAddress)
if err != nil {
t.Errorf("Server responded with an error. Error: %+v", err)
return
}
if res.StatusCode != 503 {
t.Error("No timeout error received.")
}
}
func TestHttpErrorPropagation(t *testing.T) {
initRelay()
tests := []struct {
name string
statusCode int
}{
{"Propagate http.StatusBadRequest", http.StatusBadRequest}, // 400
{"Propagate http.StatusUnauthorized", http.StatusUnauthorized}, // 401
{"Propagate http.StatusPaymentRequired", http.StatusPaymentRequired}, // 402
{"Propagate http.StatusForbidden", http.StatusForbidden}, // 403
{"Propagate http.StatusNotFound", http.StatusNotFound}, // 404
{"Propagate http.StatusMethodNotAllowed", http.StatusMethodNotAllowed}, // 405
{"Propagate http.StatusInternalServerError", http.StatusInternalServerError}, // 500
{"Propagate http.StatusNotImplemented", http.StatusNotImplemented}, // 501
{"Propagate http.StatusBadGateway", http.StatusBadGateway}, // 502
{"Propagate http.StatusServiceUnavailable", http.StatusServiceUnavailable}, // 503
{"Propagate http.StatusGatewayTimeout", http.StatusGatewayTimeout}, // 504
{"Propagate http.StatusHTTPVersionNotSupported", http.StatusHTTPVersionNotSupported}, // 505
}
for _, test := range tests {
// Invoke a sub-test
t.Run(test.name, func(t *testing.T) {
// Setup a backend function which just serves an error code.
httpServer := serveFunction(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(test.statusCode)
})
defer httpServer.Shutdown(context.Background())
// Invoke the backend function through the relay.
relayAddress := fmt.Sprint("http://127.0.0.1:", relayPort, "/client/server_name/")
res, err := http.Get(relayAddress)
if err != nil {
t.Errorf("Server responeded with an error. Error %v", err)
}
if res.StatusCode != test.statusCode {
t.Errorf("Server responeded with an unexpected status code.\n\tExpected: %v\n\tObserved: %v", test.statusCode, res.StatusCode)
}
defer res.Body.Close()
})
}
}
================================================
FILE: src/go/tests/relay/nok8s_relay_test.go
================================================
// Copyright 2022 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"log/slog"
"net"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"regexp"
"strings"
"testing"
"time"
"github.com/googlecloudrobotics/core/src/go/cmd/http-relay-client/client"
"github.com/pkg/errors"
"golang.org/x/net/websocket"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
testpb "google.golang.org/grpc/interop/grpc_testing"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
const (
RelayClientPath = "src/go/cmd/http-relay-client/http-relay-client-app_/http-relay-client-app"
RelayServerPath = "src/go/cmd/http-relay-server/http-relay-server-app_/http-relay-server-app"
)
var (
RelayClientArgs = []string{
"--backend_scheme=http",
"--relay_scheme=http",
"--server_name=remote1",
"--disable_auth_for_remote",
}
RelayServerArgs = []string{
"--port=0",
}
rsPortMatcher = regexp.MustCompile(`Relay server listening.*"Port":(\d+)`)
)
type relay struct {
rs, rc *exec.Cmd
rsPort string
}
// start brings up the relay processes
func (r *relay) start(backendAddress string, extraClientArgs ...string) error {
// run relay server exposing the relay client
var rsOut bytes.Buffer
r.rs = exec.Command(RelayServerPath, RelayServerArgs...)
r.rs.Stdout = os.Stdout
r.rs.Stderr = io.MultiWriter(os.Stderr, &rsOut)
if err := r.rs.Start(); err != nil {
return errors.Wrap(err, "failed to start relay-server")
}
r.rsPort = ""
for i := 0; i < 10; i++ {
slog.Info("Output", slog.String("Output", rsOut.String()))
if m := rsPortMatcher.FindStringSubmatch(rsOut.String()); m != nil {
r.rsPort = m[1]
slog.Info("Server port", slog.String("Port", r.rsPort))
break
}
slog.Info("Waiting for relay to be up-and-running ...")
time.Sleep(1 * time.Second)
}
if r.rsPort == "" {
return errors.New("timeout waiting for relay-server to launch")
}
// run relay client exposing the test-backend
rcArgs := append(RelayClientArgs, []string{
"--backend_address=" + backendAddress,
"--relay_address=127.0.0.1:" + r.rsPort,
}...)
rcArgs = append(rcArgs, extraClientArgs...)
slog.Info("Starting backend", slog.String("Address", backendAddress))
r.rc = exec.Command(RelayClientPath, rcArgs...)
r.rc.Stdout = os.Stdout
r.rc.Stderr = os.Stderr
if err := r.rc.Start(); err != nil {
return errors.Wrap(err, "failed to start relay-client")
}
connected := false
for i := 0; i < 10; i++ {
if strings.Contains(rsOut.String(), "Relay client connected") {
connected = true
break
}
slog.Info("Waiting for relay to be up-and-running ...")
time.Sleep(1 * time.Second)
}
if !connected {
errors.New("timeout waiting for relay-client to connect to relay-server")
}
return nil
}
// stop tears down the relay processes
func (r *relay) stop() error {
if err := r.rs.Process.Kill(); err != nil {
return errors.Wrap(err, "failed to kill relay-server")
}
if err := r.rc.Process.Kill(); err != nil {
return errors.Wrap(err, "failed to kill relay-client")
}
return nil
}
// TestHttpRelay launches a local http relay (client + server) and connects a
// test-http-server as a backend. The test is then interacting with the backend
// through the local relay.
func TestHttpRelay(t *testing.T) {
tests := []struct {
desc string
urlPath string
statusCode int
body string
}{
{
desc: "simple get",
urlPath: "/client/remote1/",
statusCode: http.StatusOK,
body: "Hello",
},
{
desc: "backend status is preserved",
urlPath: "/client/remote1/bad-path",
statusCode: http.StatusNotFound,
body: "",
},
{
desc: "invalid client",
urlPath: "/client/wrong/",
statusCode: http.StatusServiceUnavailable,
body: "",
},
}
// setup http test server
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
fmt.Fprintln(w, "Hello")
return
}
w.WriteHeader(http.StatusNotFound)
}))
defer ts.Close()
backendAddress := strings.TrimPrefix(ts.URL, "http://")
r := &relay{}
if err := r.start(backendAddress); err != nil {
t.Fatal("failed to start relay: ", err)
}
defer func() {
if err := r.stop(); err != nil {
t.Fatal("failed to stop relay: ", err)
}
}()
relayAddress := "http://127.0.0.1:" + r.rsPort
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
res, err := http.Get(relayAddress + tc.urlPath)
if err != nil {
t.Fatal(err)
}
body, err := io.ReadAll(res.Body)
res.Body.Close()
if err != nil {
t.Fatal(err)
}
if res.StatusCode != tc.statusCode {
t.Errorf("Wrong status code - got %d, expected %d", res.StatusCode, tc.statusCode)
}
if !strings.Contains(string(body), tc.body) {
t.Errorf("Wrong body - got %q, expected it to contain %q, ", body, tc.body)
}
})
}
}
// TestDroppedUserClientFreesRelayChannel checks that when the user client closes a connection,
// it is propagated to the relay server and client, closing the backend connection as well.
func TestDroppedUserClientFreesRelayChannel(t *testing.T) {
// setup http test server
connClosed := make(chan error)
defer close(connClosed)
finishServer := make(chan bool)
defer close(finishServer)
// mock a long running backend that uses chunking to send periodic updates
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
for {
select {
case <-finishServer:
return
default:
if _, err := fmt.Fprintln(w, "DEADBEEF"); err != nil {
connClosed <- err
return
}
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
} else {
t.Fatal("cannot flush")
}
time.Sleep(time.Second)
}
}
}))
defer ts.Close()
backendAddress := strings.TrimPrefix(ts.URL, "http://")
r := &relay{}
if err := r.start(backendAddress); err != nil {
t.Fatal("failed to start relay: ", err)
}
defer r.stop()
relayAddress := "http://127.0.0.1:" + r.rsPort
res, err := http.Get(relayAddress + "/client/remote1/")
if err != nil {
t.Fatal(err)
}
// receive the first chunk then terminates the connection
if _, err := bufio.NewReader(res.Body).ReadString('\n'); err != nil {
t.Fatal(err)
}
res.Body.Close()
// wait for up to 30s for backend connection to be closed
select {
case <-connClosed:
case <-time.After(30 * time.Second):
t.Error("Server did not close connection")
}
}
// TestDroppedBidiStreamFreesRelayChannel checks that when a bidi stream (websockets) closes,
// it is propagated to the relay server and client, closing the backend connection as well.
func TestDroppedBidiStreamFreesRelayChannel(t *testing.T) {
// setup http test server
done := make(chan error)
defer close(done)
// test websockets server that echo received messages
ts := httptest.NewServer(websocket.Handler(func(conn *websocket.Conn) {
r := bufio.NewReader(conn)
for {
req, err := r.ReadBytes('\n')
if err != nil {
if err == io.EOF {
done <- nil
} else {
done <- err
}
}
if _, err := conn.Write(req); err != nil {
if err == io.EOF {
done <- nil
} else {
done <- err
}
}
}
}))
defer func() {
ts.CloseClientConnections()
ts.Close()
}()
backendAddress := strings.TrimPrefix(ts.URL, "http://")
r := &relay{}
if err := r.start(backendAddress); err != nil {
t.Fatal("failed to start relay: ", err)
}
defer r.stop()
relayAddress := "ws://127.0.0.1:" + r.rsPort
clientConn, err := websocket.Dial(relayAddress+"/client/remote1/", "", "http://127.0.0.1")
if err != nil {
t.Fatal(err)
}
// receive the first message then terminates the connection
if _, err := clientConn.Write([]byte("hello\n")); err != nil {
t.Fatal(err)
}
if _, err := bufio.NewReader(clientConn).ReadString('\n'); err != nil {
t.Fatal(err)
}
clientConn.Close()
// wait for up to 30s for backend connection to be closed
select {
case err = <-done:
if err != nil {
t.Error(err)
}
case <-time.After(30 * time.Second):
t.Error("Server did not close connection")
}
}
type testServer struct {
testpb.UnimplementedTestServiceServer
responsePayload []byte
}
func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
return &testpb.Empty{}, nil
}
func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
return &testpb.SimpleResponse{
Payload: &testpb.Payload{
Body: s.responsePayload,
},
}, nil
}
type relayWithGrpcServer struct {
Listener net.Listener
GrpcServer *grpc.Server
Relay *relay
Conn *grpc.ClientConn
Ctx context.Context
}
func (r *relayWithGrpcServer) mustStop(t *testing.T) {
r.Listener.Close()
r.GrpcServer.Stop()
if err := r.Relay.stop(); err != nil {
t.Fatal("failed to stop relay: ", err)
}
r.Conn.Close()
}
func mustStartRelayWithGrpcServer(t *testing.T, service testpb.TestServiceServer) (testpb.TestServiceClient, *relayWithGrpcServer) {
t.Helper()
result := &relayWithGrpcServer{}
var err error
// Setup gRPC test server.
result.Listener, err = net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("failed to listen: %v", err)
}
result.GrpcServer = grpc.NewServer()
testpb.RegisterTestServiceServer(result.GrpcServer, service)
go result.GrpcServer.Serve(result.Listener)
// Start relay client and server.
backendAddress := fmt.Sprintf("127.0.0.1:%d", result.Listener.Addr().(*net.TCPAddr).Port)
result.Relay = &relay{}
if err := result.Relay.start(backendAddress, "--force_http2"); err != nil {
t.Fatal("failed to start relay: ", err)
}
relayAddress := "127.0.0.1:" + result.Relay.rsPort
// Create gRPC client via relay.
result.Ctx = metadata.AppendToOutgoingContext(context.Background(), "x-server-name", "remote1")
result.Conn, err = grpc.DialContext(result.Ctx, relayAddress, grpc.WithInsecure())
if err != nil {
t.Fatalf("Failed to create client connection: %v", err)
}
return testpb.NewTestServiceClient(result.Conn), result
}
// TestGrpcRelaySimpleCallWorks launches a local http relay (client + server), connects a
// grpc service as backend and issues a simple call.
func TestGrpcRelaySimpleCallWorks(t *testing.T) {
client, relayAndServer := mustStartRelayWithGrpcServer(t, &testServer{})
defer relayAndServer.mustStop(t)
if _, err := client.EmptyCall(relayAndServer.Ctx, &testpb.Empty{}); err != nil {
if ec, ok := status.FromError(err); ok {
if ec.Code() != codes.OK {
t.Errorf("Wrong error code: got %d, expected %d", ec.Code(), codes.OK)
}
}
}
}
// TestGrpcRelayChunkingOfLargeResponseWorks launches a local http relay
// (client + server), connects a grpc service as backend and issues a call
// with a response that has to be chunked.
func TestGrpcRelayChunkingOfLargeResponseWorks(t *testing.T) {
// Make responses from gRPC server larger than the default MaxChunkSize.
payload := make([]byte, client.DefaultClientConfig().MaxChunkSize*5)
for i := 0; i < len(payload); i++ {
payload[i] = byte(i) // Fill with non-zeroes
}
testServer := &testServer{responsePayload: payload}
client, relayAndServer := mustStartRelayWithGrpcServer(t, testServer)
defer relayAndServer.mustStop(t)
response, err := client.UnaryCall(relayAndServer.Ctx, &testpb.SimpleRequest{})
if err != nil {
if ec, ok := status.FromError(err); ok {
if ec.Code() != codes.OK {
t.Fatalf("Wrong error code: got %d, expected %d.", ec.Code(), codes.OK)
}
}
}
if !bytes.Equal(response.Payload.Body, testServer.responsePayload) {
t.Errorf("Received payload not equal to payload returned by server.")
}
}
// TestGrpcRelayErrorArePropagated launches a local http relay (client + server), connects a
// grpc service as backend and issues a simple call.
func TestGrpcRelayErrorArePropagated(t *testing.T) {
// UnimplementedTestServiceServer returns Unimplemented for all RPCs.
client, relayAndServer := mustStartRelayWithGrpcServer(t, &testpb.UnimplementedTestServiceServer{})
defer relayAndServer.mustStop(t)
if _, err := client.EmptyCall(relayAndServer.Ctx, &testpb.Empty{}); err != nil {
if ec, ok := status.FromError(err); ok {
if ec.Code() != codes.Unimplemented {
t.Errorf("Wrong error code: got %d, expected %d", ec.Code(), codes.Unimplemented)
}
}
}
}
================================================
FILE: src/go/tests/relay-bench.sh
================================================
#!/bin/bash
# Runs a local http server, a local relay server + client and compares direct
# access to relayed access.
trap suite_cleanup INT
backend_server_port=8082
backend_server="http://localhost:${backend_server_port}"
relay_server_port=8081
relay_server="http://localhost:${relay_server_port}"
server_name="test"
# per suite
function suite_init() {
sudo cpufreq-set -g performance
bazel build //src/go/cmd/http-relay-client:http-relay-client-bin //src/go/cmd/http-relay-server:http-relay-server-bin
which >/dev/null go-httpbin || die "Please run: go install github.com/mccutchen/go-httpbin/v2/cmd/go-httpbin"
}
function suite_cleanup() {
sudo cpufreq-set -g ondemand
test_cleanup
}
# per test
function test_init() {
go-httpbin >/tmp/backend_server.log 2>&1 \
-host 127.0.0.1 -port 8082 &
backend_server_pid=$!
bazel-bin/src/go/cmd/http-relay-server/http-relay-server-bin_/http-relay-server-bin >/tmp/relay_server.log 2>&1 \
--port=${relay_server_port} &
relay_server_pid=$!
# ensure server is up
sleep 1s
bazel-bin/src/go/cmd/http-relay-client/http-relay-client-bin_/http-relay-client-bin >/tmp/relay_client.log 2>&1 \
--backend_address=localhost:${backend_server_port} --backend_scheme=http --relay_scheme=http --server_name="${server_name}" "$@" &
relay_client_pid=$!
}
function test_cleanup() {
test -n "$relay_client_pid" && kill $relay_client_pid && unset relay_client_pid
test -n "$relay_server_pid" && kill $relay_server_pid && unset relay_server_pid
test -n "$backend_server_pid" && kill $backend_server_pid && unset backend_server_pid
}
# helper
function die {
echo "$1" >&2
exit 1
}
function get_avg() {
awk -F',' '{sum+=$7} END {print sum/NR}' $1
}
function status() {
printf "%s: direct=%9.7fs, relay=%9.7fs, slowdown=%9.7fs\n" $1 $2 $3 $(echo $3/$2 | bc -l)
}
function curltime() {
curl -w @- -o /dev/null -s "$@" <<'EOF'
%{time_namelookup},%{time_connect},%{time_appconnect},%{time_pretransfer},%{time_redirect},%{time_starttransfer},%{time_total}\n
EOF
}
# benchmarks
function run_test() {
test_name="$1"
shift
echo "==== $test_name : $@"
local num_runs
num_runs=$1
shift
local req_path
req_path="$1"
shift
test_init "$@"
(for i in $(seq $num_runs); do curltime ${backend_server}/${req_path}; done) >/tmp/direct.seq.csv
direct=$(get_avg /tmp/direct.seq.csv)
(for i in $(seq $num_runs); do curltime ${relay_server}/${req_path} -H"X-Server-Name: ${server_name}"; done) >/tmp/relay.seq.csv
relay=$(get_avg /tmp/relay.seq.csv)
status "seq" ${direct} ${relay}
(for i in $(seq $num_runs); do curltime ${backend_server}/${req_path} & done) >/tmp/direct.par.csv;
sleep 1s
direct=$(get_avg /tmp/direct.par.csv)
(for i in $(seq $num_runs); do curltime ${relay_server}/${req_path} -H"X-Server-Name: ${server_name}" & done) >/tmp/relay.par.csv;
sleep 1s
relay=$(get_avg /tmp/relay.par.csv)
status "par" ${direct} ${relay}
test_cleanup
}
function run() {
suite_init
local num_runs
num_runs=100
run_test " default_params" $num_runs "bytes/100000"
run_test " more_requests" $num_runs "bytes/100000" --max_idle_conns_per_host=100 --num_pending_requests=10
run_test " more_requests" $num_runs "bytes/100000" --max_idle_conns_per_host=100 --num_pending_requests=50
suite_cleanup
}
if [[ -z "$1" ]]; then
run
else
# call arguments verbatim:
"$@"
fi
================================================
FILE: src/go/tests/relay_test.sh
================================================
#!/usr/bin/env bash
#
# Copyright 2019 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This test only works in conjunction with a sim vm. E.g. from the top of the
# repo run: (one time)
#
# ./scripts/robot-sim.sh create "${PROJECT:?}" "sim1"
#
# Then to deploy and test the relay:
#
# ./deploy.sh fast_push "${PROJECT:?}"
# sleep 30 # allow time for http-relay-server/client to update
# bazel test --test_env GCP_PROJECT_ID="${PROJECT:?}" --test_env CLUSTER="sim1" --test_env HOME="${HOME}" --test_output=streamed --test_tag_filters="external" //src/go/tests:relay_test
#
# Instead of `sleep 30` you can watch:
#
# kubectl --context gke_${PROJECT:?}_${ZONE:?}_cloud-robotics -n app-k8s-relay get pods -w
# kubectl --context gke_${PROJECT:?}_${ZONE:?}_sim1 -n app-k8s-relay get pods -w
#
# Add -v7 to kc exec in the tests to get more details when debugging.
CLUSTER="${CLUSTER:-test-robot}"
TEST_POD_NAME="busybox-sleep"
KC_DIR=$(mktemp -d -t kc-XXXXXXXXXX)
export KUBECONFIG="${KC_DIR}/test"
touch "${KUBECONFIG}"
export KUBECACHE="${KC_DIR}/cache"
mkdir -p "${KUBECACHE}"
# gcloud expects to be able to write to its config directly.
CLOUDSDK_CONFIG=$(mktemp -d -t gcloud-XXXXXXXXXX)
export CLOUDSDK_CONFIG
cp -a ~/.config/gcloud/* "${CLOUDSDK_CONFIG}"
function kc() {
kubectl --cache-dir="${KUBECACHE}" --context="${CLUSTER}" "$@"
}
function setup() {
# configure kubectl to use relay for robot-sim vm (test-robot)
kubectl config set-credentials "${GCP_PROJECT_ID}" --exec-command=gke-gcloud-auth-plugin --exec-api-version=client.authentication.k8s.io/v1beta1
sed -i "s/provideClusterInfo: false/provideClusterInfo: true/" "${KUBECONFIG}"
kubectl config set-cluster "${CLUSTER}" --server="https://www.endpoints.${GCP_PROJECT_ID}.cloud.goog/apis/core.kubernetes-relay/client/${CLUSTER}"
kubectl config set-context "${CLUSTER}" --cluster "${CLUSTER}" --namespace "default" --user "$GCP_PROJECT_ID"
echo "Checking relay is working..."
kubectl --context "${CLUSTER}" version || test_failed "during setup, failed to reach the robot-sim VM"
# delete test pod (if running)
if kc get pod "${TEST_POD_NAME}" -o name 2>/dev/null; then
kc delete pod --ignore-not-found "${TEST_POD_NAME}"
kc wait --for=delete pod/"${TEST_POD_NAME}" --timeout=60s
fi
# deploy a container with a shell that runs sleep
kc run "${TEST_POD_NAME}" --image=gcr.io/google-containers/busybox:1.27.2 --restart=Never -- /bin/sh -c "trap : TERM INT; sleep 3600 & wait"
kc wait --for=condition=Ready pod/"${TEST_POD_NAME}"
}
function teardown() {
# delete test pod (if running)
kc delete pod --ignore-not-found "${TEST_POD_NAME}" || /bin/true
rm -rf "${KC_CFG_DIR}" "${CLOUDSDK_CONFIG}"
}
function test_failed() {
echo "TEST FAILED: $1"
exit 1
}
function test_passed() {
echo "TEST PASSED: $1"
}
function test_relay_can_exec_to_shell() {
# exec command in shell-container through the relay
res=$(kc exec "${TEST_POD_NAME}" -- echo hello)
if [[ "$res" != "hello" ]]; then
test_failed "echo command did not run, output was \"$res\", want \"hello\""
fi
test_passed "echo command worked"
}
function test_relay_handles_eof() {
# pipe commands from stdin through the relay
res=$({ echo "echo foo"; } | kc exec "${TEST_POD_NAME}" -i -- sh)
if [[ "$res" != "foo" ]]; then
test_failed "echo command did not run, output was \"$res\""
fi
test_passed "echo command worked"
}
setup
trap teardown EXIT
test_relay_can_exec_to_shell
test_relay_handles_eof
================================================
FILE: src/go.mod
================================================
module github.com/googlecloudrobotics/core/src
go 1.25.0
toolchain go1.25.4
require (
cloud.google.com/go v0.123.0 // indirect
contrib.go.opencensus.io/exporter/prometheus v0.4.2
contrib.go.opencensus.io/exporter/stackdriver v0.13.14
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver v1.5.0 // indirect
github.com/Masterminds/sprig v2.22.0+incompatible // indirect
github.com/cenkalti/backoff v2.2.1+incompatible
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/fsnotify/fsnotify v1.9.0
github.com/getlantern/httptest v0.0.0-20161025015934-4b40f4c7e590
github.com/getlantern/mockconn v0.0.0-20190403061815-a8ffa60494a6 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/golang/mock v1.7.0-rc.1
github.com/golang/protobuf v1.5.4 // indirect
github.com/huandu/xstrings v1.3.3 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/motemen/go-loghttp v0.0.0-20170804080138-974ac5ceac27
github.com/motemen/go-nuts v0.0.0-20220604134737-2658d0104f31 // indirect
github.com/onsi/gomega v1.27.10
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.17.0
github.com/prometheus/procfs v0.12.0 // indirect
github.com/spf13/cobra v1.10.2
github.com/spf13/pflag v1.0.10
go.opencensus.io v0.24.0
golang.org/x/crypto v0.46.0
golang.org/x/net v0.48.0
golang.org/x/sync v0.19.0
golang.org/x/sys v0.42.0 // indirect
google.golang.org/api v0.256.0
google.golang.org/grpc v1.79.3
google.golang.org/protobuf v1.36.11
gopkg.in/h2non/gock.v1 v1.1.2
k8s.io/api v0.28.4
k8s.io/apiextensions-apiserver v0.28.4
k8s.io/apimachinery v0.28.4
k8s.io/cli-runtime v0.28.4
k8s.io/client-go v0.28.4
k8s.io/helm v2.17.0+incompatible
k8s.io/klog v1.0.0
sigs.k8s.io/controller-runtime v0.16.3
sigs.k8s.io/kind v0.17.0
sigs.k8s.io/yaml v1.6.0
)
require (
cloud.google.com/go/storage v1.59.2
github.com/aws/aws-sdk-go v1.45.25 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/prometheus/statsd_exporter v0.22.8 // indirect
golang.org/x/oauth2 v0.34.0
google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 // indirect
)
require (
github.com/form3tech-oss/jwt-go v3.2.5+incompatible
github.com/golang/glog v1.2.5
github.com/google/go-cmp v0.7.0
github.com/google/nftables v0.3.0
github.com/googlecloudrobotics/ilog v0.0.0-20240112131211-2efd642f756e
github.com/jaypipes/ghw v0.17.0
k8s.io/klog/v2 v2.110.1
)
require (
cel.dev/expr v0.25.1 // indirect
cloud.google.com/go/auth v0.17.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 // indirect
github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect
github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-jose/go-jose/v4 v4.1.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 // indirect
github.com/mdlayher/socket v0.5.0 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
go.opentelemetry.io/otel v1.43.0 // indirect
go.opentelemetry.io/otel/metric v1.43.0 // indirect
go.opentelemetry.io/otel/sdk v1.43.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect
go.opentelemetry.io/otel/trace v1.43.0 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
)
require (
cloud.google.com/go/compute/metadata v0.9.0
cloud.google.com/go/iam v1.5.3 // indirect
cloud.google.com/go/monitoring v1.24.2 // indirect
cloud.google.com/go/trace v1.11.6 // indirect
github.com/BurntSushi/toml v1.2.1 // indirect
github.com/StackExchange/wmi v1.2.1 // indirect
github.com/alessio/shellescape v1.4.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v5.7.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.7.0 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/zapr v1.2.4 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.20.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/google/safetext v0.0.0-20221026122733-23539d61753f // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jaypipes/pcidb v1.0.1
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.45.0 // indirect
github.com/prometheus/prometheus v0.48.0 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.26.0 // indirect
golang.org/x/exp v0.0.0-20231127185646-65229373498e // indirect
golang.org/x/term v0.38.0 // indirect
golang.org/x/text v0.32.0 // indirect
golang.org/x/time v0.14.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect
gopkg.in/evanphx/json-patch.v5 v5.6.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
howett.net/plist v1.0.0 // indirect
k8s.io/component-base v0.28.4 // indirect
k8s.io/kube-openapi v0.0.0-20231129212854-f0671cc7e66a // indirect
k8s.io/utils v0.0.0-20231127182322-b307cd553661 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/api v0.15.0 // indirect
sigs.k8s.io/kustomize/kyaml v0.15.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
)
================================================
FILE: src/go.sum
================================================
cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4=
cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE=
cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU=
cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4=
cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ=
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc=
cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU=
cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc=
cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=
cloud.google.com/go/longrunning v0.7.0 h1:FV0+SYF1RIj59gyoWDRi45GiYUMM3K1qO51qoboQT1E=
cloud.google.com/go/longrunning v0.7.0/go.mod h1:ySn2yXmjbK9Ba0zsQqunhDkYi0+9rlXIwnoAf+h+TPY=
cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM=
cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.59.2 h1:gmOAuG1opU8YvycMNpP+DvHfT9BfzzK5Cy+arP+Nocw=
cloud.google.com/go/storage v1.59.2/go.mod h1:cMWbtM+anpC74gn6qjLh+exqYcfmB9Hqe5z6adx+CLI=
cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4=
cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI=
contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg=
contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ=
contrib.go.opencensus.io/exporter/stackdriver v0.13.14 h1:zBakwHardp9Jcb8sQHcHpXy/0+JIb1M8KjigCJzx7+4=
contrib.go.opencensus.io/exporter/stackdriver v0.13.14/go.mod h1:5pSSGY0Bhuk7waTHuDf4aQ8D2DrhgETRo9fy6k3Xlzc=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 h1:lhhYARPUu3LmHysQ/igznQphfzynnqI3D75oUyw1HXk=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0/go.mod h1:l9rva3ApbBpEJxSNYnwT9N4CDLrWgtq3u8736C5hyJw=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.54.0 h1:xfK3bbi6F2RDtaZFtUdKO3osOBIhNb+xTs8lFW6yx9o=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.54.0/go.mod h1:vB2GH9GAYYJTO3mEn8oYwzEdhlayZIdQz6zdzgUIRvA=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 h1:s0WlVbf9qpvkh1c/uDAPElam0WrL7fHRIidgZJ7UqZI=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0/go.mod h1:Mf6O40IAyB9zR/1J8nGDDPirZQQPbYJni8Yisy7NTMc=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4=
github.com/aws/aws-sdk-go v1.45.25/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w=
github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.14.0 h1:hbG2kr4RuFj222B6+7T83thSPqLjwBIfQawTkC++2HA=
github.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU=
github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g=
github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98=
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4=
github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA=
github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI=
github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8=
github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/getlantern/httptest v0.0.0-20161025015934-4b40f4c7e590 h1:OhyiFx+yBN30O3IHrIq+9LAEhy6o7fin21wUQxF8NiE=
github.com/getlantern/httptest v0.0.0-20161025015934-4b40f4c7e590/go.mod h1:rE/jidqqHHG9sjSxC24Gd5YCfZ1AT91C2wjJ28TAOfA=
github.com/getlantern/mockconn v0.0.0-20190403061815-a8ffa60494a6 h1:+aO65ByJw74kV8vXqvkj49P5RtIqyUObyeRTIxMz218=
github.com/getlantern/mockconn v0.0.0-20190403061815-a8ffa60494a6/go.mod h1:+F5GJ7qGpQ03DBtcOEyQpM30ix4BLswdaojecFtsdy8=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-jose/go-jose/v4 v4.1.4 h1:moDMcTHmvE6Groj34emNPLs/qtYXRVcd6S7NHbHz3kA=
github.com/go-jose/go-jose/v4 v4.1.4/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo=
github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA=
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ=
github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I=
github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/mock v1.7.0-rc.1 h1:YojYx61/OLFsiv6Rw1Z96LpldJIy31o+UHmwAUMJ6/U=
github.com/golang/mock v1.7.0-rc.1/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU=
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=
github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=
github.com/google/nftables v0.3.0 h1:bkyZ0cbpVeMHXOrtlFc8ISmfVqq5gPJukoYieyVmITg=
github.com/google/nftables v0.3.0/go.mod h1:BCp9FsrbF1Fn/Yu6CLUc9GGZFw/+hsxfluNXXmxBfRM=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ=
github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg=
github.com/google/safetext v0.0.0-20221026122733-23539d61753f h1:03r+JaAB8/2z83KOOCZK95tslx6e41NZS4Tpt569MtY=
github.com/google/safetext v0.0.0-20221026122733-23539d61753f/go.mod h1:mJNEy0r5YPHC7ChQffpOszlGB4L1iqjXWpIEKcFpr9s=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ=
github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo=
github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc=
github.com/googlecloudrobotics/ilog v0.0.0-20240112131211-2efd642f756e h1:lfnmC6SUHV/5QrqXElmZ0WgojfIccKVNtxDry4T3AS8=
github.com/googlecloudrobotics/ilog v0.0.0-20240112131211-2efd642f756e/go.mod h1:t9Up/i5bPfkBc7lEE+p0+lcD0NDw2zTTr19x19D7720=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4=
github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jaypipes/ghw v0.17.0 h1:EVLJeNcy5z6GK/Lqby0EhBpynZo+ayl8iJWY0kbEUJA=
github.com/jaypipes/ghw v0.17.0/go.mod h1:In8SsaDqlb1oTyrbmTC14uy+fbBMvp+xdqX51MidlD8=
github.com/jaypipes/pcidb v1.0.1 h1:WB2zh27T3nwg8AE8ei81sNRb9yWBii3JGNJtT7K9Oic=
github.com/jaypipes/pcidb v1.0.1/go.mod h1:6xYUz/yYEyOkIkUt2t2J2folIuZ4Yg6uByCGFXMCeE4=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg=
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o=
github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI=
github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/motemen/go-loghttp v0.0.0-20170804080138-974ac5ceac27 h1:uAI3rnOT1OSSY4PUtI/M1orb3q0ewkovwd3wr8xSno4=
github.com/motemen/go-loghttp v0.0.0-20170804080138-974ac5ceac27/go.mod h1:6eu9CfGt5kfrMVgeu9MfB9PRUnpc47I+udLswiTszI8=
github.com/motemen/go-nuts v0.0.0-20220604134737-2658d0104f31 h1:lQ+0Zt2gm+w5+9iaBWKdJXC/gMrWjHhNbw9ts/9rSZ4=
github.com/motemen/go-nuts v0.0.0-20220604134737-2658d0104f31/go.mod h1:vkBO+XDNzovo+YLBpUod2SFvuWLObXlERnfj99RP3rU=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/prometheus v0.48.0 h1:yrBloImGQ7je4h8M10ujGh4R6oxYQJQKlMuETwNskGk=
github.com/prometheus/prometheus v0.48.0/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g=
github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI=
github.com/prometheus/statsd_exporter v0.22.8 h1:Qo2D9ZzaQG+id9i5NYNGmbf1aa/KxKbB9aKfMS+Yib0=
github.com/prometheus/statsd_exporter v0.22.8/go.mod h1:/DzwbTEaFTE0Ojz5PqcSk6+PFHOPWGxdXVr6yC8eFOM=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo=
github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE=
go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I=
go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 h1:wm/Q0GAAykXv83wzcKzGGqAnnfLFyFe7RslekZuv+VI=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0/go.mod h1:ra3Pa40+oKjvYh+ZD3EdxFZZB0xdMfuileHAm4nNN7w=
go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM=
go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY=
go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg=
go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg=
go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw=
go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A=
go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A=
go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0=
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY=
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No=
golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI=
google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc=
google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc=
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls=
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE=
google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/evanphx/json-patch.v5 v5.6.0 h1:BMT6KIwBD9CaU91PJCZIe46bDmBWa9ynTQgJIOpfQBk=
gopkg.in/evanphx/json-patch.v5 v5.6.0/go.mod h1:/kvTRh1TVm5wuM6OkHxqXtE/1nUZZpihg29RtuIyfvk=
gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY=
gopkg.in/h2non/gock.v1 v1.1.2/go.mod h1:n7UGz/ckNChHiK05rDoiC4MYSunEC/lyaUm2WWaDva0=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY=
k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0=
k8s.io/apiextensions-apiserver v0.28.4 h1:AZpKY/7wQ8n+ZYDtNHbAJBb+N4AXXJvyZx6ww6yAJvU=
k8s.io/apiextensions-apiserver v0.28.4/go.mod h1:pgQIZ1U8eJSMQcENew/0ShUTlePcSGFq6dxSxf2mwPM=
k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8=
k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg=
k8s.io/cli-runtime v0.28.4 h1:IW3aqSNFXiGDllJF4KVYM90YX4cXPGxuCxCVqCD8X+Q=
k8s.io/cli-runtime v0.28.4/go.mod h1:MLGRB7LWTIYyYR3d/DOgtUC8ihsAPA3P8K8FDNIqJ0k=
k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY=
k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4=
k8s.io/component-base v0.28.4 h1:c/iQLWPdUgI90O+T9TeECg8o7N3YJTiuz2sKxILYcYo=
k8s.io/component-base v0.28.4/go.mod h1:m9hR0uvqXDybiGL2nf/3Lf0MerAfQXzkfWhUY58JUbU=
k8s.io/helm v2.17.0+incompatible h1:Bpn6o1wKLYqKM3+Osh8e+1/K2g/GsQJ4F4yNF2+deao=
k8s.io/helm v2.17.0+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
k8s.io/kube-openapi v0.0.0-20231129212854-f0671cc7e66a h1:ZeIPbyHHqahGIbeyLJJjAUhnxCKqXaDY+n89Ms8szyA=
k8s.io/kube-openapi v0.0.0-20231129212854-f0671cc7e66a/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI=
k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4=
sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kind v0.17.0 h1:CScmGz/wX66puA06Gj8OZb76Wmk7JIjgWf5JDvY7msM=
sigs.k8s.io/kind v0.17.0/go.mod h1:Qqp8AiwOlMZmJWs37Hgs31xcbiYXjtXlRBSftcnZXQk=
sigs.k8s.io/kustomize/api v0.15.0 h1:6Ca88kEOBVotHDw+y2IsIMYtg9Pvv7MKpW9JMyF/OH4=
sigs.k8s.io/kustomize/api v0.15.0/go.mod h1:p19kb+E14gN7zcIBR/nhByJDAfUa7N8mp6ZdH/mMXbg=
sigs.k8s.io/kustomize/kyaml v0.15.0 h1:ynlLMAxDhrY9otSg5GYE2TcIz31XkGZ2Pkj7SdolD84=
sigs.k8s.io/kustomize/kyaml v0.15.0/go.mod h1:+uMkBahdU1KNOj78Uta4rrXH+iH7wvg+nW7+GULvREA=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
================================================
FILE: src/gomod.sh
================================================
#!/usr/bin/env bash
#
# Copyright 2021 The Cloud Robotics Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script updates the Go dependencies in go.mod and go.sum and applies
# changes to the WORKSPACE file.
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export GO111MODULE=on
cd ${DIR}
go mod tidy -compat=1.17
bazel run //:gazelle
echo "updates done"
================================================
FILE: src/proto/http-relay/BUILD.bazel
================================================
load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
# http relay api
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
package(default_visibility = ["//visibility:public"])
proto_library(
name = "http_over_rpc_proto",
srcs = ["http_over_rpc.proto"],
)
go_proto_library(
name = "http_over_rpc_proto_go",
importpath = "github.com/googlecloudrobotics/core/src/proto/http-relay",
proto = ":http_relay_proto",
)
proto_library(
name = "http_relay_proto",
srcs = ["http_over_rpc.proto"],
)
go_library(
name = "go_default_library",
srcs = ["unused.go"],
embed = [":http_over_rpc_proto_go"],
importpath = "github.com/googlecloudrobotics/core/src/proto/http-relay",
)
================================================
FILE: src/proto/http-relay/http_over_rpc.proto
================================================
// Copyright 2019 The Cloud Robotics Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// These messages encapsulate an HTTP request. They're used by the Kubernetes
// relay to encapsulate an HTTP request as a payload over an RPC channel.
syntax = "proto2";
package cloudrobotics.http_relay.v1alpha1;
option go_package = "github.com/googlecloudrobotics/core/src/proto/http-relay;http_relay";
message HttpHeader {
optional string name = 1;
optional string value = 2;
}
message HttpRequest {
optional string id = 1;
optional string method = 2;
optional string host = 6;
optional string url = 3;
repeated HttpHeader header = 4;
optional bytes body = 5;
}
// Each HttpRequest may generate a stream of multiple HTTP responses with the
// same id. The first response in the stream must contain status_code and
// header, and only the last response in the stream must have eof set to true.
// It's legal to send just one message with the entire response.
message HttpResponse {
optional string id = 4;
optional int32 status_code = 1;
repeated HttpHeader header = 2;
optional bytes body = 3;
optional bool eof = 5;
repeated HttpHeader trailer = 6;
optional int64 backend_duration_ms=7;
}
================================================
FILE: src/proto/http-relay/unused.go
================================================
// package http_relay is generated by the proto compiler during the build
// process. This dummy file exists to make the Golang toolchain happy.
// We may need to replace it with a generated file to truly fix the build,
// though.
//
//go:generate protoc --go_out=. --go_opt=paths=source_relative http_over_rpc.proto
package http_relay
================================================
FILE: third_party/BUILD
================================================
================================================
FILE: third_party/BUILD.bazel
================================================
licenses(["notice"])
exports_files(
glob(["*.BUILD"]),
visibility = ["//visibility:public"],
)
================================================
FILE: third_party/README.md
================================================
# 3rd party deps
This directory contains bazel support for 3rd-party deps and vendored
dependencies.
## 3rd party helm charts
We store version pinned 3rd party helm charts here to achieve a hermetic build.
To update a chart, first setup helm:
```shell
curl -s https://get.helm.sh/helm-v2.17.0-linux-amd64.tar.gz | tar xzf - -C ~/bin --strip-components=1 linux-amd64/helm
helm init --client-only
```
Next check available versions and fetch the specific version to store:
```shell
helm search prometheus-community/prometheus-operator --version='>7.0.0' --versions
helm fetch prometheus-community/prometheus-operator --version=7.5.0
```
================================================
FILE: third_party/akri/BUILD.bazel
================================================
# https://github.com/helm/charts/blob/master/LICENSE
# Apache license
licenses(["notice"])
# Helm chart for akri (version 0.12.9) included here was pulled from the
# project-akri github repo and modified to be compatible with the Intrinsic
# codebase. Future versions can be updated by running
# sh update-akri.sh
exports_files(
glob([
"*.yaml",
"*.tgz",
]),
visibility = ["//visibility:public"],
)
================================================
FILE: third_party/akri/akri-configuration-crd.yaml
================================================
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: configurations.akri.sh
spec:
group: akri.sh
versions:
- name: v0
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
discoveryHandler:
type: object
properties:
name:
type: string
discoveryDetails:
type: string
discoveryProperties:
nullable: true
type: array
items:
type: object
required:
- name
properties:
name:
type: string
pattern: "^[_A-Za-z][_A-Za-z0-9]*$"
value:
type: string
nullable: true
valueFrom:
type: object
properties:
secretKeyRef:
type: object
required:
- name
properties:
key:
type: string
name:
type: string
namespace:
type: string
optional:
type: boolean
configMapKeyRef:
type: object
required:
- name
properties:
key:
type: string
name:
type: string
namespace:
type: string
optional:
type: boolean
oneOf:
- properties:
required: ["secretKeyRef"]
- properties:
required: ["configMapKeyRef"]
oneOf:
- properties:
required: ["value"]
- properties:
required: ["valueFrom"]
capacity:
type: integer
brokerSpec:
type: object
properties:
brokerJobSpec:
x-kubernetes-preserve-unknown-fields: true
type: object
nullable: true
brokerPodSpec:
x-kubernetes-preserve-unknown-fields: true
type: object
nullable: true
instanceServiceSpec:
x-kubernetes-preserve-unknown-fields: true
type: object
nullable: true
configurationServiceSpec:
x-kubernetes-preserve-unknown-fields: true
type: object
nullable: true
brokerProperties:
additionalProperties:
type: string
type: object
additionalPrinterColumns:
- name: Capacity
type: string
description: The capacity for each Instance discovered
jsonPath: .spec.capacity
- name: Age
type: date
jsonPath: .metadata.creationTimestamp
scope: Namespaced
names:
plural: configurations
singular: configuration
kind: Configuration
shortNames:
- akric
================================================
FILE: third_party/akri/akri-instance-crd.yaml
================================================
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: instances.akri.sh
spec:
group: akri.sh
versions:
- name: v0
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
configurationName:
type: string
brokerProperties:
additionalProperties:
type: string
type: object
shared:
type: boolean
nodes:
type: array
items:
type: string
deviceUsage: # map
additionalProperties:
type: string
type: object
additionalPrinterColumns:
- name: Config
type: string
description: The Configuration this Instance belongs to
jsonPath: .spec.configurationName
- name: Shared
type: boolean
description: Describes whether this Instance is shared
jsonPath: .spec.shared
- name: Nodes
type: string
description: Nodes that expose this Instance
jsonPath: .spec.nodes
- name: Age
type: date
jsonPath: .metadata.creationTimestamp
scope: Namespaced
names:
plural: instances
singular: instance
kind: Instance
shortNames:
- akrii
================================================
FILE: third_party/akri/update-akri.sh
================================================
#!/bin/bash
VERSION=0.12.9
# fetch the latest version
helm repo add akri-helm-charts https://project-akri.github.io/akri/
helm repo update akri-helm-charts
helm pull akri-helm-charts/akri --version="${VERSION}"
# Update crds
tar xzf "akri-${VERSION}.tgz" --strip-components=2 akri/crds/
# Strip template comments
sed -i 's/#.*$//' akri-configuration-crd.yaml
================================================
FILE: third_party/app_crd.BUILD
================================================
# Kubernetes Applications CRD
package(
default_visibility = ["//visibility:public"],
)
licenses(["permissive"]) # Apache 2.0
filegroup(
name = "app_crd",
srcs = [
"config/crd/bases/app.k8s.io_applications.yaml",
],
)
================================================
FILE: third_party/cert-manager/BUILD.bazel
================================================
# https://github.com/jetstack/cert-manager/blob/master/LICENSE
# Apache 2.0 license
licenses(["notice"])
# files downloaded by running:
# helm repo add jetstack https://charts.jetstack.io --force-update
# cert_manager_version="v1.16.3"
# helm pull jetstack/cert-manager --version ${cert_manager_version} -d third_party/cert-manager/
exports_files(
glob([
"*.tgz",
]),
visibility = ["//visibility:public"],
)
================================================
FILE: third_party/cert-manager-google-cas-issuer/BUILD.bazel
================================================
# https://github.com/jetstack/google-cas-issuer/blob/main/LICENSE.txt
# Apache 2.0 license
licenses(["notice"])
# files downloaded by running:
# cert_manager_version="v0.6.2"
# curl -o third_party/cert-manager/cert-manager-${cert_manager_version}.tgz https://charts.jetstack.io/charts/cert-manager-google-cas-issuer-${cert_manager_version}.tgz
# alternatively, files can be downloaded using helm
# cert_manager_version="v0.6.2"
# helm repo add cert-manager https://charts.jetstack.io
# helm pull cert-manager/cert-manager-google-cas-issuer --version ${cert_manager_version}
exports_files(
glob([
"*.tgz",
]),
visibility = ["//visibility:public"],
)
================================================
FILE: third_party/fluentd_gcp_addon/BUILD.bazel
================================================
# Description:
# Stackdriver Logging Agent is a DaemonSet which spawns a pod on each
# node that reads logs, generated by kubelet, container runtime and
# containers and sends them to the Stackdriver. When logs are exported
# to the Stackdriver, they can be searched, viewed, and analyzed.
#
# See:
# https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/fluentd-gcp
# Last update: df908c3aad70be495b358ab6d8e62ec4b1ca0726
#
# In order to update them run:
#
# THIS_DIR=$PWD
# cd
# git clone https://github.com/kubernetes/kubernetes.git
# cd kubernetes
# (source "cluster/gce/gci/configure-helper.sh" && setup-fluentd cluster/addons)
# for f in cluster/addons/fluentd-gcp/fluentd-gcp-{configmap,ds}.yaml; do \
# grep -v "namespace: kube-system" $f >${THIS_DIR}/third_party/fluentd_gcp_addon/$(basename $f); \
# done
#
# and apply changes as noted in each file.
#
# NOTE: The configmap has been amended to support multi line log entries
# (search for "BEGIN modifications" in fluentd-gcp-configmap.yaml).
licenses(["permissive"]) # Apache 2.0
filegroup(
name = "fluentd_gcp_addon",
srcs = [
"fluentd-gcp-configmap.yaml",
"fluentd-gcp-ds.yaml",
],
visibility = ["//visibility:public"],
)
================================================
FILE: third_party/fluentd_gcp_addon/fluentd-gcp-configmap.yaml
================================================
# Copied from https://github.com/kubernetes/kubernetes/
# - removed:
# - addonmanager.kubernetes.io/mode label
#
# License: Apache 2.0
# https://github.com/kubernetes/kubernetes/blob/master/LICENSE
# This ConfigMap is used to ingest logs against new resources like
# "k8s_container" and "k8s_node" when $LOGGING_STACKDRIVER_RESOURCE_TYPES is set
# to "new".
# When $LOGGING_STACKDRIVER_RESOURCE_TYPES is set to "old", the ConfigMap in
# fluentd-gcp-configmap-old.yaml will be used for ingesting logs against old
# resources like "gke_container" and "gce_instance".
kind: ConfigMap
apiVersion: v1
data:
containers.input.conf: |-
# This configuration file for Fluentd is used
# to watch changes to Docker log files that live in the
# directory /var/lib/docker/containers/ and are symbolically
# linked to from the /var/log/containers directory using names that capture the
# pod name and container name. These logs are then submitted to
# Google Cloud Logging which assumes the installation of the cloud-logging plug-in.
#
# Example
# =======
# A line in the Docker log file might look like this JSON:
#
# {"log":"2014/09/25 21:15:03 Got request with path wombat\\n",
# "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z"}
#
# The original tag is derived from the log file's location.
# For example a Docker container's logs might be in the directory:
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
# and in the file:
# 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
# where 997599971ee6... is the Docker ID of the running container.
# The Kubernetes kubelet makes a symbolic link to this file on the host
# machine in the /var/log/containers directory which includes the pod name,
# the namespace name and the Kubernetes container name:
# synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
# ->
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
# The /var/log directory on the host is mapped to the /var/log directory in the container
# running this instance of Fluentd and we end up collecting the file:
# /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
# This results in the tag:
# var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
# where 'synthetic-logger-0.25lps-pod' is the pod name, 'default' is the
# namespace name, 'synth-lgr' is the container name and '997599971ee6..' is
# the container ID.
# The record reformer is used to extract pod_name, namespace_name and
# container_name from the tag and set them in a local_resource_id in the
# format of:
# 'k8s_container...'.
# The reformer also changes the tags to 'stderr' or 'stdout' based on the
# value of 'stream'.
# local_resource_id is later used by google_cloud plugin to determine the
# monitored resource to ingest logs against.
# Json Log Example:
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
# CRI Log Example:
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
@type tail
path /var/log/containers/*.log
pos_file /var/log/gcp-containers.log.pos
# Tags at this point are in the format of:
# reform.var.log.containers.__-.log
tag reform.*
read_from_head true
@type multi_format
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
format /^(?.+) (?stdout|stderr) [^ ]* (?.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
# Concatenate multi line log entries.
# We search for the expected prefix using the same regex as the parser below
# and group all consecutive lines until the prefix is encountered again.
@type concat
key log
multiline_start_regexp /^\w\d{4} [^\s]*\s+\d+\s+[^ \]]+\]/
flush_interval 1
timeout_label @OUTPUT
separator ""
system.input.conf: |-
# Example:
# <86>1 2021-03-16T08:32:50.502885+01:00 my-hostname sudo - - - pam_unix(sudo:session): session opened for user root(uid=0) by admin(uid=1000)
@type tail
format syslog
path /var/log/syslog.fluentd
pos_file /var/log/syslog.fluentd.pos
message_format rfc5424
with_priority true
tag syslog
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
# TODO(random-liu): Remove this after cri container runtime rolls out.
@type tail
format /^time="(?[^)]*)" level=(?[^ ]*) msg="(?[^"]*)"( err="(?[^"]*)")?( statusCode=($\d+))?/
path /var/log/docker.log
pos_file /var/log/gcp-docker.log.pos
tag docker
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
@type tail
# Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities).
format none
path /var/log/etcd.log
pos_file /var/log/gcp-etcd.log.pos
tag etcd
# Multi-line parsing is required for all the kube logs because very large log
# statements, such as those that include entire object bodies, get split into
# multiple lines by glog.
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kubelet.log
pos_file /var/log/gcp-kubelet.log.pos
tag kubelet
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-proxy.log
pos_file /var/log/gcp-kube-proxy.log.pos
tag kube-proxy
# Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-apiserver.log
pos_file /var/log/gcp-kube-apiserver.log.pos
tag kube-apiserver
# Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-controller-manager.log
pos_file /var/log/gcp-kube-controller-manager.log.pos
tag kube-controller-manager
# Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-scheduler.log
pos_file /var/log/gcp-kube-scheduler.log.pos
tag kube-scheduler
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/glbc.log
pos_file /var/log/gcp-glbc.log.pos
tag glbc
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/cluster-autoscaler.log
pos_file /var/log/gcp-cluster-autoscaler.log.pos
tag cluster-autoscaler
# Logs from systemd-journal for interesting services.
# TODO(random-liu): Keep this for compatibility, remove this after
# cri container runtime rolls out.
@type systemd
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
pos_file /var/log/gcp-journald-docker.pos
read_from_head true
tag docker
@type systemd
filters [{ "_SYSTEMD_UNIT": "{{ fluentd_container_runtime_service }}.service" }]
pos_file /var/log/gcp-journald-container-runtime.pos
read_from_head true
tag container-runtime
@type systemd
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
pos_file /var/log/gcp-journald-kubelet.pos
read_from_head true
tag kubelet
@type systemd
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
pos_file /var/log/gcp-journald-node-problem-detector.pos
read_from_head true
tag node-problem-detector
# BEGIN_NODE_JOURNAL
# Whether to include node-journal or not is determined when starting the
# cluster. It is not changed when the cluster is already running.
@type systemd
pos_file /var/log/gcp-journald.pos
read_from_head true
tag node-journal
@type grep
key _SYSTEMD_UNIT
pattern ^(docker|{{ fluentd_container_runtime_service }}|kubelet|node-problem-detector)\.service$
# END_NODE_JOURNAL
monitoring.conf: |-
# This source is used to acquire approximate process start timestamp,
# which purpose is explained before the corresponding output plugin.
@type exec
command /bin/sh -c 'date +%s'
tag process_start
time_format %Y-%m-%d %H:%M:%S
keys process_start_timestamp
# This filter is used to convert process start timestamp to integer
# value for correct ingestion in the prometheus output plugin.
@type record_transformer
enable_ruby true
auto_typecast true
process_start_timestamp ${record["process_start_timestamp"].to_i}
metadata:
name: fluentd-gcp-config-v1.2.5
---
kind: ConfigMap
apiVersion: v1
metadata:
name: fluentd-gcp-main-config
data:
google-fluentd.conf: |-
@include config.d/*.conf
@type prometheus
port 24231
@type prometheus_monitor
# This match is placed before the all-matching output to provide metric
# exporter with a process start timestamp for correct exporting of
# cumulative metrics to Stackdriver.
@type prometheus
type gauge
name process_start_time_seconds
desc Timestamp of the process start in seconds
key process_start_timestamp
# Unify output of regular and "timed out" entries from the concat plugin.
# See https://github.com/fluent-plugins-nursery/fluent-plugin-concat/commit/5802235e71f6b31d741aa1205e710c08dee415cf.
# Ideally we would remove the @OUTPUT label from the timed out entries
# to avoid nesting the entire output logic under "" but
# fluentd does not seem to provide a way to do that.
@type relabel
@label @OUTPUT
@type parser
format /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?(.|\n)*.)/
reserve_data true
suppress_parse_error_log true
emit_invalid_record_to_error false
key_name log
@type record_reformer
enable_ruby true
# Extract local_resource_id from tag for 'k8s_container' monitored
# resource. The format is:
# 'k8s_container...'.
"logging.googleapis.com/local_resource_id" ${"k8s_container.#{tag_suffix[4].rpartition('.')[0].split('_')[1]}.#{tag_suffix[4].rpartition('.')[0].split('_')[0]}.#{tag_suffix[4].rpartition('.')[0].split('_')[2].rpartition('-')[0]}"}
# Rename the field 'log' to a more generic field 'message'. This way the
# fluent-plugin-google-cloud knows to flatten the field as textPayload
# instead of jsonPayload after extracting 'time', 'severity' and
# 'stream' from the record.
message ${record['log']}
# If 'severity' is not set, assume stderr is ERROR and stdout is INFO.
severity ${record['severity'] || if record['stream'] == 'stderr' then 'ERROR' else 'INFO' end}
tag ${if record['stream'] == 'stderr' then 'raw.stderr' else 'raw.stdout' end}
remove_keys stream,log
# Detect exceptions in the log output and forward them as one log entry.
@type detect_exceptions
remove_tag_prefix raw
message message
stream "logging.googleapis.com/local_resource_id"
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
# This filter allows to count the number of log entries read by fluentd
# before they are processed by the output plugin. This in turn allows to
# monitor the number of log entries that were read but never sent, e.g.
# because of liveness probe removing buffer.
@type prometheus
type counter
name logging_entry_count
desc Total number of log entries generated by either application containers or system components
# This section is exclusive for k8s_container logs. Those come with
# 'stderr'/'stdout' tags.
# TODO(instrumentation): Reconsider this workaround later.
# Trim the entries which exceed slightly less than 100KB, to avoid
# dropping them. It is a necessity, because Stackdriver only supports
# entries that are up to 100KB in size.
@type record_transformer
enable_ruby true
message ${record['message'].length > 100000 ? "[Trimmed]#{record['message'][0..100000]}..." : record['message']}
# Do not collect fluentd's own logs to avoid infinite loops.
@type null
# Add a unique insertId to each log entry that doesn't already have it.
# This helps guarantee the order and prevent log duplication.
@type add_insert_ids
# This section is exclusive for k8s_container logs. These logs come with
# 'stderr'/'stdout' tags.
# We use a separate output stanza for 'k8s_node' logs with a smaller buffer
# because node logs are less important than user's container logs.
@type google_cloud
# Try to detect JSON formatted log entries.
detect_json true
# Collect metrics in Prometheus registry about plugin activity.
enable_monitoring true
monitoring_type prometheus
# Allow log entries from multiple containers to be sent in the same request.
split_logs_by_tag false
# Set the buffer type to file to improve the reliability and reduce the memory consumption
buffer_type file
buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer
# Set queue_full action to block because we want to pause gracefully
# in case of the off-the-limits load instead of throwing an exception
buffer_queue_full_action block
# Set the chunk limit conservatively to avoid exceeding the recommended
# chunk size of 5MB per write request.
buffer_chunk_limit 512k
# Length limit of chunk queue. Together with `buffer_chunk_limit` affects disk space used.
buffer_queue_limit 64
# Never wait more than 5 seconds before flushing logs in the non-error case.
flush_interval 5s
# Never wait longer than 30 seconds between retries.
max_retry_wait 30
# Disable the limit on the number of retries (retry forever).
disable_retry_limit
# Use multiple threads for processing.
num_threads 8
use_grpc true
# Skip timestamp adjustment as this is in a controlled environment with
# known timestamp format. This helps with CPU usage.
adjust_invalid_timestamps false
# Attach local_resource_id for 'k8s_node' monitored resource.
@type record_transformer
enable_ruby true
"logging.googleapis.com/local_resource_id" ${"k8s_node.#{ENV['NODE_NAME']}"}
# This section is exclusive for 'k8s_node' logs. These logs come with tags
# that are neither 'stderr' or 'stdout'.
# We use a separate output stanza for 'k8s_container' logs with a larger
# buffer because user's container logs are more important than node logs.
@type google_cloud
detect_json true
enable_monitoring true
monitoring_type prometheus
# Allow entries from multiple system logs to be sent in the same request.
split_logs_by_tag false
detect_subservice false
buffer_type file
buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer
buffer_queue_full_action block
buffer_chunk_limit 512k
buffer_queue_limit 64
flush_interval 5s
max_retry_wait 30
disable_retry_limit
num_threads 8
use_grpc true
# Skip timestamp adjustment as this is in a controlled environment with
# known timestamp format. This helps with CPU usage.
adjust_invalid_timestamps false
================================================
FILE: third_party/fluentd_gcp_addon/fluentd-gcp-ds.yaml
================================================
# Copied from https://github.com/kubernetes/kubernetes/
# - removed:
# - addonmanager.kubernetes.io/mode, kubernetes.io/cluster-service label
# - serviceAccountName
# - liveness probe
# - scheduling annotations
# - priorityClassName
# - nodeSelectors
# - tolerations
# - terminationGracePeriodSeconds
# - NODE_NAME, STACKDRIVER_METADATA_AGENT_URL env
# - hostNetwork: true (otherwise it won't find the metadata server)
# - modified the path to local libs (/usr/lib) because it's different on MIR
# - set the fluentd-gcp-config volume mode to 420
# - added a taint toleration so it runs on all nodes
#
# For release notes check:
# https://github.com/GoogleCloudPlatform/google-fluentd/releases
#
# License: Apache 2.0
# https://github.com/kubernetes/kubernetes/blob/master/LICENSE
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd-gcp-v3.2.0
labels:
k8s-app: fluentd-gcp
version: v3.2.0
spec:
selector:
matchLabels:
k8s-app: fluentd-gcp
version: v3.2.0
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
k8s-app: fluentd-gcp
version: v3.2.0
spec:
dnsPolicy: Default
containers:
- name: fluentd-gcp
image: gcr.io/stackdriver-agents/stackdriver-logging-agent:1.9.5
args:
- -q
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- mountPath: /host/lib
name: libsystemddir
readOnly: true
- name: config-volume
mountPath: /etc/google-fluentd/config.d
- name: default-config-volume
mountPath: /etc/google-fluentd/google-fluentd.conf
subPath: google-fluentd.conf
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: libsystemddir
hostPath:
path: /usr/lib
- name: config-volume
configMap:
name: fluentd-gcp-config-v1.2.5
defaultMode: 420
- name: default-config-volume
configMap:
name: fluentd-gcp-main-config
defaultMode: 420
tolerations:
- operator: "Exists"
effect: "NoSchedule"
================================================
FILE: third_party/helm2/BUILD.bazel
================================================
exports_files(["helm"])
================================================
FILE: third_party/helm3/BUILD.bazel
================================================
exports_files(["helm"])
================================================
FILE: third_party/ingress-nginx.BUILD
================================================
# Description:
# grafana dashboards
licenses(["notice"]) # Apache-2.0
filegroup(
name = "ingress-nginx-dashboards",
srcs = glob(["deploy/grafana/dashboards/*.json"]),
visibility = ["//visibility:public"],
)
================================================
FILE: third_party/kube-prometheus-stack/00-crds.yaml
================================================
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.13.0
operator.prometheus.io/version: 0.71.2
name: probes.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
categories:
- prometheus-operator
kind: Probe
listKind: ProbeList
plural: probes
shortNames:
- prb
singular: probe
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: Probe defines monitoring for a set of static targets or ingresses.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Specification of desired Ingress selection for target discovery
by Prometheus.
properties:
authorization:
description: Authorization section for this endpoint
properties:
credentials:
description: Selects a key of a Secret in the namespace that contains
the credentials for authentication.
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type. The value is case-insensitive.
\n \"Basic\" is not a supported value. \n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: 'BasicAuth allow an endpoint to authenticate over basic
authentication. More info: https://prometheus.io/docs/operating/configuration/#endpoint'
properties:
password:
description: '`password` specifies a key of a Secret containing
the password for authentication.'
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a Secret containing
the username for authentication.'
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenSecret:
description: Secret to mount to read bearer token for scraping targets.
The secret needs to be in the same namespace as the probe and accessible
by the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must be a
valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
interval:
description: Interval at which targets are probed using the configured
prober. If not specified Prometheus' global scrape interval is used.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
jobName:
description: The job name assigned to scraped metrics by default.
type: string
keepDroppedTargets:
description: "Per-scrape limit on the number of targets dropped by
relabeling that will be kept in memory. 0 means no limit. \n It
requires Prometheus >= v2.47.0."
format: int64
type: integer
labelLimit:
description: Per-scrape limit on number of labels that will be accepted
for a sample. Only valid in Prometheus versions 2.27.0 and newer.
format: int64
type: integer
labelNameLengthLimit:
description: Per-scrape limit on length of labels name that will be
accepted for a sample. Only valid in Prometheus versions 2.27.0
and newer.
format: int64
type: integer
labelValueLengthLimit:
description: Per-scrape limit on length of labels value that will
be accepted for a sample. Only valid in Prometheus versions 2.27.0
and newer.
format: int64
type: integer
metricRelabelings:
description: MetricRelabelConfigs to apply to samples before ingestion.
items:
description: "RelabelConfig allows dynamic rewriting of the label
set for targets, alerts, scraped samples and remote write samples.
\n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config"
properties:
action:
default: replace
description: "Action to perform based on the regex matching.
\n `Uppercase` and `Lowercase` actions require Prometheus
>= v2.36.0. `DropEqual` and `KeepEqual` actions require Prometheus
>= v2.41.0. \n Default: \"Replace\""
enum:
- replace
- Replace
- keep
- Keep
- drop
- Drop
- hashmod
- HashMod
- labelmap
- LabelMap
- labeldrop
- LabelDrop
- labelkeep
- LabelKeep
- lowercase
- Lowercase
- uppercase
- Uppercase
- keepequal
- KeepEqual
- dropequal
- DropEqual
type: string
modulus:
description: "Modulus to take of the hash of the source label
values. \n Only applicable when the action is `HashMod`."
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched.
type: string
replacement:
description: "Replacement value against which a Replace action
is performed if the regular expression matches. \n Regex capture
groups are available."
type: string
separator:
description: Separator is the string between concatenated SourceLabels.
type: string
sourceLabels:
description: The source labels select values from existing labels.
Their content is concatenated using the configured Separator
and matched against the configured regular expression.
items:
description: LabelName is a valid Prometheus label name which
may only contain ASCII letters, numbers, as well as underscores.
pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
type: string
type: array
targetLabel:
description: "Label to which the resulting string is written
in a replacement. \n It is mandatory for `Replace`, `HashMod`,
`Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions.
\n Regex capture groups are available."
type: string
type: object
type: array
module:
description: 'The module to use for probing specifying how to probe
the target. Example module configuring in the blackbox exporter:
https://github.com/prometheus/blackbox_exporter/blob/master/example.yml'
type: string
oauth2:
description: OAuth2 for the URL. Only valid in Prometheus versions
2.27.0 and newer.
properties:
clientId:
description: '`clientId` specifies a key of a Secret or ConfigMap
containing the OAuth2 client''s ID.'
properties:
configMap:
description: ConfigMap containing data to use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of a Secret containing
the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the HTTP parameters
to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes used for the
token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to fetch the token
from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
prober:
description: Specification for the prober to use for probing targets.
The prober.URL parameter is required. Targets cannot be probed if
left empty.
properties:
path:
default: /probe
description: Path to collect metrics from. Defaults to `/probe`.
type: string
proxyUrl:
description: Optional ProxyURL.
type: string
scheme:
description: HTTP scheme to use for scraping. `http` and `https`
are the expected values unless you rewrite the `__scheme__`
label via relabeling. If empty, Prometheus uses the default
value `http`.
enum:
- http
- https
type: string
url:
description: Mandatory URL of the prober.
type: string
required:
- url
type: object
sampleLimit:
description: SampleLimit defines per-scrape limit on number of scraped
samples that will be accepted.
format: int64
type: integer
scrapeTimeout:
description: Timeout for scraping metrics from the Prometheus exporter.
If not specified, the Prometheus global scrape timeout is used.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
targetLimit:
description: TargetLimit defines a limit on the number of scraped
targets that will be accepted.
format: int64
type: integer
targets:
description: Targets defines a set of static or dynamically discovered
targets to probe.
properties:
ingress:
description: ingress defines the Ingress objects to probe and
the relabeling configuration. If `staticConfig` is also defined,
`staticConfig` takes precedence.
properties:
namespaceSelector:
description: From which namespaces to select Ingress objects.
properties:
any:
description: Boolean describing whether all namespaces
are selected in contrast to a list restricting them.
type: boolean
matchNames:
description: List of namespace names to select from.
items:
type: string
type: array
type: object
relabelingConfigs:
description: 'RelabelConfigs to apply to the label set of
the target before it gets scraped. The original ingress
address is available via the `__tmp_prometheus_ingress_address`
label. It can be used to customize the probed URL. The original
scrape job''s name is available via the `__tmp_prometheus_job_name`
label. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config'
items:
description: "RelabelConfig allows dynamic rewriting of
the label set for targets, alerts, scraped samples and
remote write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config"
properties:
action:
default: replace
description: "Action to perform based on the regex matching.
\n `Uppercase` and `Lowercase` actions require Prometheus
>= v2.36.0. `DropEqual` and `KeepEqual` actions require
Prometheus >= v2.41.0. \n Default: \"Replace\""
enum:
- replace
- Replace
- keep
- Keep
- drop
- Drop
- hashmod
- HashMod
- labelmap
- LabelMap
- labeldrop
- LabelDrop
- labelkeep
- LabelKeep
- lowercase
- Lowercase
- uppercase
- Uppercase
- keepequal
- KeepEqual
- dropequal
- DropEqual
type: string
modulus:
description: "Modulus to take of the hash of the source
label values. \n Only applicable when the action is
`HashMod`."
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched.
type: string
replacement:
description: "Replacement value against which a Replace
action is performed if the regular expression matches.
\n Regex capture groups are available."
type: string
separator:
description: Separator is the string between concatenated
SourceLabels.
type: string
sourceLabels:
description: The source labels select values from existing
labels. Their content is concatenated using the configured
Separator and matched against the configured regular
expression.
items:
description: LabelName is a valid Prometheus label
name which may only contain ASCII letters, numbers,
as well as underscores.
pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
type: string
type: array
targetLabel:
description: "Label to which the resulting string is
written in a replacement. \n It is mandatory for `Replace`,
`HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and
`DropEqual` actions. \n Regex capture groups are available."
type: string
type: object
type: array
selector:
description: Selector to select the Ingress objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector
that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In, NotIn,
Exists and DoesNotExist.
type: string
values:
description: values is an array of string values.
If the operator is In or NotIn, the values array
must be non-empty. If the operator is Exists or
DoesNotExist, the values array must be empty.
This array is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field is
"key", the operator is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
type: object
staticConfig:
description: 'staticConfig defines the static list of targets
to probe and the relabeling configuration. If `ingress` is also
defined, `staticConfig` takes precedence. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config.'
properties:
labels:
additionalProperties:
type: string
description: Labels assigned to all metrics scraped from the
targets.
type: object
relabelingConfigs:
description: 'RelabelConfigs to apply to the label set of
the targets before it gets scraped. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config'
items:
description: "RelabelConfig allows dynamic rewriting of
the label set for targets, alerts, scraped samples and
remote write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config"
properties:
action:
default: replace
description: "Action to perform based on the regex matching.
\n `Uppercase` and `Lowercase` actions require Prometheus
>= v2.36.0. `DropEqual` and `KeepEqual` actions require
Prometheus >= v2.41.0. \n Default: \"Replace\""
enum:
- replace
- Replace
- keep
- Keep
- drop
- Drop
- hashmod
- HashMod
- labelmap
- LabelMap
- labeldrop
- LabelDrop
- labelkeep
- LabelKeep
- lowercase
- Lowercase
- uppercase
- Uppercase
- keepequal
- KeepEqual
- dropequal
- DropEqual
type: string
modulus:
description: "Modulus to take of the hash of the source
label values. \n Only applicable when the action is
`HashMod`."
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched.
type: string
replacement:
description: "Replacement value against which a Replace
action is performed if the regular expression matches.
\n Regex capture groups are available."
type: string
separator:
description: Separator is the string between concatenated
SourceLabels.
type: string
sourceLabels:
description: The source labels select values from existing
labels. Their content is concatenated using the configured
Separator and matched against the configured regular
expression.
items:
description: LabelName is a valid Prometheus label
name which may only contain ASCII letters, numbers,
as well as underscores.
pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
type: string
type: array
targetLabel:
description: "Label to which the resulting string is
written in a replacement. \n It is mandatory for `Replace`,
`HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and
`DropEqual` actions. \n Regex capture groups are available."
type: string
type: object
type: array
static:
description: The list of hosts to probe.
items:
type: string
type: array
type: object
type: object
tlsConfig:
description: TLS configuration to use when scraping the endpoint.
properties:
ca:
description: Certificate authority used when verifying server
certificates.
properties:
configMap:
description: ConfigMap containing data to use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key file for the targets.
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
type: object
required:
- spec
type: object
served: true
storage: true
================================================
FILE: third_party/kube-prometheus-stack/01-crds.yaml
================================================
{{ if eq .Values.app_management "true" }}
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.13.0
operator.prometheus.io/version: 0.71.2
name: alertmanagerconfigs.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
categories:
- prometheus-operator
kind: AlertmanagerConfig
listKind: AlertmanagerConfigList
plural: alertmanagerconfigs
shortNames:
- amcfg
singular: alertmanagerconfig
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: AlertmanagerConfig configures the Prometheus Alertmanager, specifying
how alerts should be grouped, inhibited and notified to external systems.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: AlertmanagerConfigSpec is a specification of the desired
behavior of the Alertmanager configuration. By definition, the Alertmanager
configuration only applies to alerts for which the `namespace` label
is equal to the namespace of the AlertmanagerConfig resource.
properties:
inhibitRules:
description: List of inhibition rules. The rules will only apply to
alerts matching the resource's namespace.
items:
description: InhibitRule defines an inhibition rule that allows
to mute alerts when other alerts are already firing. See https://prometheus.io/docs/alerting/latest/configuration/#inhibit_rule
properties:
equal:
description: Labels that must have an equal value in the source
and target alert for the inhibition to take effect.
items:
type: string
type: array
sourceMatch:
description: Matchers for which one or more alerts have to exist
for the inhibition to take effect. The operator enforces that
the alert matches the resource's namespace.
items:
description: Matcher defines how to match on alert's labels.
properties:
matchType:
description: Match operation available with AlertManager
>= v0.22.0 and takes precedence over Regex (deprecated)
if non-empty.
enum:
- '!='
- =
- =~
- '!~'
type: string
name:
description: Label to match.
minLength: 1
type: string
regex:
description: 'Whether to match on equality (false) or
regular-expression (true). Deprecated: for AlertManager
>= v0.22.0, `matchType` should be used instead.'
type: boolean
value:
description: Label value to match.
type: string
required:
- name
type: object
type: array
targetMatch:
description: Matchers that have to be fulfilled in the alerts
to be muted. The operator enforces that the alert matches
the resource's namespace.
items:
description: Matcher defines how to match on alert's labels.
properties:
matchType:
description: Match operation available with AlertManager
>= v0.22.0 and takes precedence over Regex (deprecated)
if non-empty.
enum:
- '!='
- =
- =~
- '!~'
type: string
name:
description: Label to match.
minLength: 1
type: string
regex:
description: 'Whether to match on equality (false) or
regular-expression (true). Deprecated: for AlertManager
>= v0.22.0, `matchType` should be used instead.'
type: boolean
value:
description: Label value to match.
type: string
required:
- name
type: object
type: array
type: object
type: array
muteTimeIntervals:
description: List of MuteTimeInterval specifying when the routes should
be muted.
items:
description: MuteTimeInterval specifies the periods in time when
notifications will be muted
properties:
name:
description: Name of the time interval
type: string
timeIntervals:
description: TimeIntervals is a list of TimeInterval
items:
description: TimeInterval describes intervals of time
properties:
daysOfMonth:
description: DaysOfMonth is a list of DayOfMonthRange
items:
description: DayOfMonthRange is an inclusive range of
days of the month beginning at 1
properties:
end:
description: End of the inclusive range
maximum: 31
minimum: -31
type: integer
start:
description: Start of the inclusive range
maximum: 31
minimum: -31
type: integer
type: object
type: array
months:
description: Months is a list of MonthRange
items:
description: MonthRange is an inclusive range of months
of the year beginning in January Months can be specified
by name (e.g 'January') by numerical month (e.g '1')
or as an inclusive range (e.g 'January:March', '1:3',
'1:March')
pattern: ^((?i)january|february|march|april|may|june|july|august|september|october|november|december|[1-12])(?:((:((?i)january|february|march|april|may|june|july|august|september|october|november|december|[1-12]))$)|$)
type: string
type: array
times:
description: Times is a list of TimeRange
items:
description: TimeRange defines a start and end time
in 24hr format
properties:
endTime:
description: EndTime is the end time in 24hr format.
pattern: ^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$)
type: string
startTime:
description: StartTime is the start time in 24hr
format.
pattern: ^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$)
type: string
type: object
type: array
weekdays:
description: Weekdays is a list of WeekdayRange
items:
description: WeekdayRange is an inclusive range of days
of the week beginning on Sunday Days can be specified
by name (e.g 'Sunday') or as an inclusive range (e.g
'Monday:Friday')
pattern: ^((?i)sun|mon|tues|wednes|thurs|fri|satur)day(?:((:(sun|mon|tues|wednes|thurs|fri|satur)day)$)|$)
type: string
type: array
years:
description: Years is a list of YearRange
items:
description: YearRange is an inclusive range of years
pattern: ^2\d{3}(?::2\d{3}|$)
type: string
type: array
type: object
type: array
type: object
type: array
receivers:
description: List of receivers.
items:
description: Receiver defines one or more notification integrations.
properties:
discordConfigs:
description: List of Discord configurations.
items:
description: DiscordConfig configures notifications via Discord.
See https://prometheus.io/docs/alerting/latest/configuration/#discord_config
properties:
apiURL:
description: The secret's key that contains the Discord
webhook URL. The secret needs to be in the same namespace
as the AlertmanagerConfig object and accessible by the
Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
httpConfig:
description: HTTP client configuration.
properties:
authorization:
description: Authorization header configuration for
the client. This is mutually exclusive with BasicAuth
and is only available starting from Alertmanager
v0.22+.
properties:
credentials:
description: Selects a key of a Secret in the
namespace that contains the credentials for
authentication.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type.
The value is case-insensitive. \n \"Basic\"
is not a supported value. \n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: BasicAuth for the client. This is mutually
exclusive with Authorization. If both are defined,
BasicAuth takes precedence.
properties:
password:
description: '`password` specifies a key of a
Secret containing the password for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a
Secret containing the username for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenSecret:
description: The secret's key that contains the bearer
token to be used by the client for authentication.
The secret needs to be in the same namespace as
the AlertmanagerConfig object and accessible by
the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
followRedirects:
description: FollowRedirects specifies whether the
client should follow HTTP 3xx redirects.
type: boolean
oauth2:
description: OAuth2 client credentials used to fetch
a token for the targets.
properties:
clientId:
description: '`clientId` specifies a key of a
Secret or ConfigMap containing the OAuth2 client''s
ID.'
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of
a Secret containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the
HTTP parameters to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes
used for the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to
fetch the token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyURL:
description: Optional proxy URL.
type: string
tlsConfig:
description: TLS configuration for the client.
properties:
ca:
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key
file for the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the
targets.
type: string
type: object
type: object
message:
description: The template of the message's body.
type: string
sendResolved:
description: Whether or not to notify about resolved alerts.
type: boolean
title:
description: The template of the message's title.
type: string
required:
- apiURL
type: object
type: array
emailConfigs:
description: List of Email configurations.
items:
description: EmailConfig configures notifications via Email.
properties:
authIdentity:
description: The identity to use for authentication.
type: string
authPassword:
description: The secret's key that contains the password
to use for authentication. The secret needs to be in
the same namespace as the AlertmanagerConfig object
and accessible by the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
authSecret:
description: The secret's key that contains the CRAM-MD5
secret. The secret needs to be in the same namespace
as the AlertmanagerConfig object and accessible by the
Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
authUsername:
description: The username to use for authentication.
type: string
from:
description: The sender address.
type: string
headers:
description: Further headers email header key/value pairs.
Overrides any headers previously set by the notification
implementation.
items:
description: KeyValue defines a (key, value) tuple.
properties:
key:
description: Key of the tuple.
minLength: 1
type: string
value:
description: Value of the tuple.
type: string
required:
- key
- value
type: object
type: array
hello:
description: The hostname to identify to the SMTP server.
type: string
html:
description: The HTML body of the email notification.
type: string
requireTLS:
description: The SMTP TLS requirement. Note that Go does
not support unencrypted connections to remote SMTP endpoints.
type: boolean
sendResolved:
description: Whether or not to notify about resolved alerts.
type: boolean
smarthost:
description: The SMTP host and port through which emails
are sent. E.g. example.com:25
type: string
text:
description: The text body of the email notification.
type: string
tlsConfig:
description: TLS configuration
properties:
ca:
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to use
for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for
the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when doing
client-authentication.
properties:
configMap:
description: ConfigMap containing data to use
for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for
the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key file
for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
to:
description: The email address to send notifications to.
type: string
type: object
type: array
msteamsConfigs:
description: List of MSTeams configurations. It requires Alertmanager
>= 0.26.0.
items:
description: MSTeamsConfig configures notifications via Microsoft
Teams. It requires Alertmanager >= 0.26.0.
properties:
httpConfig:
description: HTTP client configuration.
properties:
authorization:
description: Authorization header configuration for
the client. This is mutually exclusive with BasicAuth
and is only available starting from Alertmanager
v0.22+.
properties:
credentials:
description: Selects a key of a Secret in the
namespace that contains the credentials for
authentication.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type.
The value is case-insensitive. \n \"Basic\"
is not a supported value. \n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: BasicAuth for the client. This is mutually
exclusive with Authorization. If both are defined,
BasicAuth takes precedence.
properties:
password:
description: '`password` specifies a key of a
Secret containing the password for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a
Secret containing the username for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenSecret:
description: The secret's key that contains the bearer
token to be used by the client for authentication.
The secret needs to be in the same namespace as
the AlertmanagerConfig object and accessible by
the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
followRedirects:
description: FollowRedirects specifies whether the
client should follow HTTP 3xx redirects.
type: boolean
oauth2:
description: OAuth2 client credentials used to fetch
a token for the targets.
properties:
clientId:
description: '`clientId` specifies a key of a
Secret or ConfigMap containing the OAuth2 client''s
ID.'
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of
a Secret containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the
HTTP parameters to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes
used for the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to
fetch the token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyURL:
description: Optional proxy URL.
type: string
tlsConfig:
description: TLS configuration for the client.
properties:
ca:
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key
file for the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the
targets.
type: string
type: object
type: object
sendResolved:
description: Whether to notify about resolved alerts.
type: boolean
text:
description: Message body template.
type: string
title:
description: Message title template.
type: string
webhookUrl:
description: MSTeams webhook URL.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
required:
- webhookUrl
type: object
type: array
name:
description: Name of the receiver. Must be unique across all
items from the list.
minLength: 1
type: string
opsgenieConfigs:
description: List of OpsGenie configurations.
items:
description: OpsGenieConfig configures notifications via OpsGenie.
See https://prometheus.io/docs/alerting/latest/configuration/#opsgenie_config
properties:
actions:
description: Comma separated list of actions that will
be available for the alert.
type: string
apiKey:
description: The secret's key that contains the OpsGenie
API key. The secret needs to be in the same namespace
as the AlertmanagerConfig object and accessible by the
Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
apiURL:
description: The URL to send OpsGenie API requests to.
type: string
description:
description: Description of the incident.
type: string
details:
description: A set of arbitrary key/value pairs that provide
further detail about the incident.
items:
description: KeyValue defines a (key, value) tuple.
properties:
key:
description: Key of the tuple.
minLength: 1
type: string
value:
description: Value of the tuple.
type: string
required:
- key
- value
type: object
type: array
entity:
description: Optional field that can be used to specify
which domain alert is related to.
type: string
httpConfig:
description: HTTP client configuration.
properties:
authorization:
description: Authorization header configuration for
the client. This is mutually exclusive with BasicAuth
and is only available starting from Alertmanager
v0.22+.
properties:
credentials:
description: Selects a key of a Secret in the
namespace that contains the credentials for
authentication.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type.
The value is case-insensitive. \n \"Basic\"
is not a supported value. \n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: BasicAuth for the client. This is mutually
exclusive with Authorization. If both are defined,
BasicAuth takes precedence.
properties:
password:
description: '`password` specifies a key of a
Secret containing the password for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a
Secret containing the username for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenSecret:
description: The secret's key that contains the bearer
token to be used by the client for authentication.
The secret needs to be in the same namespace as
the AlertmanagerConfig object and accessible by
the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
followRedirects:
description: FollowRedirects specifies whether the
client should follow HTTP 3xx redirects.
type: boolean
oauth2:
description: OAuth2 client credentials used to fetch
a token for the targets.
properties:
clientId:
description: '`clientId` specifies a key of a
Secret or ConfigMap containing the OAuth2 client''s
ID.'
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of
a Secret containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the
HTTP parameters to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes
used for the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to
fetch the token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyURL:
description: Optional proxy URL.
type: string
tlsConfig:
description: TLS configuration for the client.
properties:
ca:
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key
file for the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the
targets.
type: string
type: object
type: object
message:
description: Alert text limited to 130 characters.
type: string
note:
description: Additional alert note.
type: string
priority:
description: Priority level of alert. Possible values
are P1, P2, P3, P4, and P5.
type: string
responders:
description: List of responders responsible for notifications.
items:
description: OpsGenieConfigResponder defines a responder
to an incident. One of `id`, `name` or `username`
has to be defined.
properties:
id:
description: ID of the responder.
type: string
name:
description: Name of the responder.
type: string
type:
description: Type of responder.
enum:
- team
- teams
- user
- escalation
- schedule
minLength: 1
type: string
username:
description: Username of the responder.
type: string
required:
- type
type: object
type: array
sendResolved:
description: Whether or not to notify about resolved alerts.
type: boolean
source:
description: Backlink to the sender of the notification.
type: string
tags:
description: Comma separated list of tags attached to
the notifications.
type: string
updateAlerts:
description: Whether to update message and description
of the alert in OpsGenie if it already exists By default,
the alert is never updated in OpsGenie, the new message
only appears in activity log.
type: boolean
type: object
type: array
pagerdutyConfigs:
description: List of PagerDuty configurations.
items:
description: PagerDutyConfig configures notifications via
PagerDuty. See https://prometheus.io/docs/alerting/latest/configuration/#pagerduty_config
properties:
class:
description: The class/type of the event.
type: string
client:
description: Client identification.
type: string
clientURL:
description: Backlink to the sender of notification.
type: string
component:
description: The part or component of the affected system
that is broken.
type: string
description:
description: Description of the incident.
type: string
details:
description: Arbitrary key/value pairs that provide further
detail about the incident.
items:
description: KeyValue defines a (key, value) tuple.
properties:
key:
description: Key of the tuple.
minLength: 1
type: string
value:
description: Value of the tuple.
type: string
required:
- key
- value
type: object
type: array
group:
description: A cluster or grouping of sources.
type: string
httpConfig:
description: HTTP client configuration.
properties:
authorization:
description: Authorization header configuration for
the client. This is mutually exclusive with BasicAuth
and is only available starting from Alertmanager
v0.22+.
properties:
credentials:
description: Selects a key of a Secret in the
namespace that contains the credentials for
authentication.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type.
The value is case-insensitive. \n \"Basic\"
is not a supported value. \n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: BasicAuth for the client. This is mutually
exclusive with Authorization. If both are defined,
BasicAuth takes precedence.
properties:
password:
description: '`password` specifies a key of a
Secret containing the password for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a
Secret containing the username for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenSecret:
description: The secret's key that contains the bearer
token to be used by the client for authentication.
The secret needs to be in the same namespace as
the AlertmanagerConfig object and accessible by
the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
followRedirects:
description: FollowRedirects specifies whether the
client should follow HTTP 3xx redirects.
type: boolean
oauth2:
description: OAuth2 client credentials used to fetch
a token for the targets.
properties:
clientId:
description: '`clientId` specifies a key of a
Secret or ConfigMap containing the OAuth2 client''s
ID.'
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of
a Secret containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the
HTTP parameters to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes
used for the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to
fetch the token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyURL:
description: Optional proxy URL.
type: string
tlsConfig:
description: TLS configuration for the client.
properties:
ca:
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key
file for the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the
targets.
type: string
type: object
type: object
pagerDutyImageConfigs:
description: A list of image details to attach that provide
further detail about an incident.
items:
description: PagerDutyImageConfig attaches images to
an incident
properties:
alt:
description: Alt is the optional alternative text
for the image.
type: string
href:
description: Optional URL; makes the image a clickable
link.
type: string
src:
description: Src of the image being attached to
the incident
type: string
type: object
type: array
pagerDutyLinkConfigs:
description: A list of link details to attach that provide
further detail about an incident.
items:
description: PagerDutyLinkConfig attaches text links
to an incident
properties:
alt:
description: Text that describes the purpose of
the link, and can be used as the link's text.
type: string
href:
description: Href is the URL of the link to be attached
type: string
type: object
type: array
routingKey:
description: The secret's key that contains the PagerDuty
integration key (when using Events API v2). Either this
field or `serviceKey` needs to be defined. The secret
needs to be in the same namespace as the AlertmanagerConfig
object and accessible by the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
sendResolved:
description: Whether or not to notify about resolved alerts.
type: boolean
serviceKey:
description: The secret's key that contains the PagerDuty
service key (when using integration type "Prometheus").
Either this field or `routingKey` needs to be defined.
The secret needs to be in the same namespace as the
AlertmanagerConfig object and accessible by the Prometheus
Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
severity:
description: Severity of the incident.
type: string
url:
description: The URL to send requests to.
type: string
type: object
type: array
pushoverConfigs:
description: List of Pushover configurations.
items:
description: PushoverConfig configures notifications via Pushover.
See https://prometheus.io/docs/alerting/latest/configuration/#pushover_config
properties:
device:
description: The name of a device to send the notification
to
type: string
expire:
description: How long your notification will continue
to be retried for, unless the user acknowledges the
notification.
pattern: ^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$
type: string
html:
description: Whether notification message is HTML or plain
text.
type: boolean
httpConfig:
description: HTTP client configuration.
properties:
authorization:
description: Authorization header configuration for
the client. This is mutually exclusive with BasicAuth
and is only available starting from Alertmanager
v0.22+.
properties:
credentials:
description: Selects a key of a Secret in the
namespace that contains the credentials for
authentication.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type.
The value is case-insensitive. \n \"Basic\"
is not a supported value. \n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: BasicAuth for the client. This is mutually
exclusive with Authorization. If both are defined,
BasicAuth takes precedence.
properties:
password:
description: '`password` specifies a key of a
Secret containing the password for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a
Secret containing the username for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenSecret:
description: The secret's key that contains the bearer
token to be used by the client for authentication.
The secret needs to be in the same namespace as
the AlertmanagerConfig object and accessible by
the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
followRedirects:
description: FollowRedirects specifies whether the
client should follow HTTP 3xx redirects.
type: boolean
oauth2:
description: OAuth2 client credentials used to fetch
a token for the targets.
properties:
clientId:
description: '`clientId` specifies a key of a
Secret or ConfigMap containing the OAuth2 client''s
ID.'
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of
a Secret containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the
HTTP parameters to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes
used for the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to
fetch the token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyURL:
description: Optional proxy URL.
type: string
tlsConfig:
description: TLS configuration for the client.
properties:
ca:
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key
file for the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the
targets.
type: string
type: object
type: object
message:
description: Notification message.
type: string
priority:
description: Priority, see https://pushover.net/api#priority
type: string
retry:
description: How often the Pushover servers will send
the same notification to the user. Must be at least
30 seconds.
pattern: ^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$
type: string
sendResolved:
description: Whether or not to notify about resolved alerts.
type: boolean
sound:
description: The name of one of the sounds supported by
device clients to override the user's default sound
choice
type: string
title:
description: Notification title.
type: string
token:
description: The secret's key that contains the registered
application's API token, see https://pushover.net/apps.
The secret needs to be in the same namespace as the
AlertmanagerConfig object and accessible by the Prometheus
Operator. Either `token` or `tokenFile` is required.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
tokenFile:
description: The token file that contains the registered
application's API token, see https://pushover.net/apps.
Either `token` or `tokenFile` is required. It requires
Alertmanager >= v0.26.0.
type: string
url:
description: A supplementary URL shown alongside the message.
type: string
urlTitle:
description: A title for supplementary URL, otherwise
just the URL is shown
type: string
userKey:
description: The secret's key that contains the recipient
user's user key. The secret needs to be in the same
namespace as the AlertmanagerConfig object and accessible
by the Prometheus Operator. Either `userKey` or `userKeyFile`
is required.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
userKeyFile:
description: The user key file that contains the recipient
user's user key. Either `userKey` or `userKeyFile` is
required. It requires Alertmanager >= v0.26.0.
type: string
type: object
type: array
slackConfigs:
description: List of Slack configurations.
items:
description: SlackConfig configures notifications via Slack.
See https://prometheus.io/docs/alerting/latest/configuration/#slack_config
properties:
actions:
description: A list of Slack actions that are sent with
each notification.
items:
description: SlackAction configures a single Slack action
that is sent with each notification. See https://api.slack.com/docs/message-attachments#action_fields
and https://api.slack.com/docs/message-buttons for
more information.
properties:
confirm:
description: SlackConfirmationField protect users
from destructive actions or particularly distinguished
decisions by asking them to confirm their button
click one more time. See https://api.slack.com/docs/interactive-message-field-guide#confirmation_fields
for more information.
properties:
dismissText:
type: string
okText:
type: string
text:
minLength: 1
type: string
title:
type: string
required:
- text
type: object
name:
type: string
style:
type: string
text:
minLength: 1
type: string
type:
minLength: 1
type: string
url:
type: string
value:
type: string
required:
- text
- type
type: object
type: array
apiURL:
description: The secret's key that contains the Slack
webhook URL. The secret needs to be in the same namespace
as the AlertmanagerConfig object and accessible by the
Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
callbackId:
type: string
channel:
description: The channel or user to send notifications
to.
type: string
color:
type: string
fallback:
type: string
fields:
description: A list of Slack fields that are sent with
each notification.
items:
description: SlackField configures a single Slack field
that is sent with each notification. Each field must
contain a title, value, and optionally, a boolean
value to indicate if the field is short enough to
be displayed next to other fields designated as short.
See https://api.slack.com/docs/message-attachments#fields
for more information.
properties:
short:
type: boolean
title:
minLength: 1
type: string
value:
minLength: 1
type: string
required:
- title
- value
type: object
type: array
footer:
type: string
httpConfig:
description: HTTP client configuration.
properties:
authorization:
description: Authorization header configuration for
the client. This is mutually exclusive with BasicAuth
and is only available starting from Alertmanager
v0.22+.
properties:
credentials:
description: Selects a key of a Secret in the
namespace that contains the credentials for
authentication.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type.
The value is case-insensitive. \n \"Basic\"
is not a supported value. \n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: BasicAuth for the client. This is mutually
exclusive with Authorization. If both are defined,
BasicAuth takes precedence.
properties:
password:
description: '`password` specifies a key of a
Secret containing the password for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a
Secret containing the username for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenSecret:
description: The secret's key that contains the bearer
token to be used by the client for authentication.
The secret needs to be in the same namespace as
the AlertmanagerConfig object and accessible by
the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
followRedirects:
description: FollowRedirects specifies whether the
client should follow HTTP 3xx redirects.
type: boolean
oauth2:
description: OAuth2 client credentials used to fetch
a token for the targets.
properties:
clientId:
description: '`clientId` specifies a key of a
Secret or ConfigMap containing the OAuth2 client''s
ID.'
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of
a Secret containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the
HTTP parameters to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes
used for the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to
fetch the token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyURL:
description: Optional proxy URL.
type: string
tlsConfig:
description: TLS configuration for the client.
properties:
ca:
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key
file for the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the
targets.
type: string
type: object
type: object
iconEmoji:
type: string
iconURL:
type: string
imageURL:
type: string
linkNames:
type: boolean
mrkdwnIn:
items:
type: string
type: array
pretext:
type: string
sendResolved:
description: Whether or not to notify about resolved alerts.
type: boolean
shortFields:
type: boolean
text:
type: string
thumbURL:
type: string
title:
type: string
titleLink:
type: string
username:
type: string
type: object
type: array
snsConfigs:
description: List of SNS configurations
items:
description: SNSConfig configures notifications via AWS SNS.
See https://prometheus.io/docs/alerting/latest/configuration/#sns_configs
properties:
apiURL:
description: The SNS API URL i.e. https://sns.us-east-2.amazonaws.com.
If not specified, the SNS API URL from the SNS SDK will
be used.
type: string
attributes:
additionalProperties:
type: string
description: SNS message attributes.
type: object
httpConfig:
description: HTTP client configuration.
properties:
authorization:
description: Authorization header configuration for
the client. This is mutually exclusive with BasicAuth
and is only available starting from Alertmanager
v0.22+.
properties:
credentials:
description: Selects a key of a Secret in the
namespace that contains the credentials for
authentication.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type.
The value is case-insensitive. \n \"Basic\"
is not a supported value. \n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: BasicAuth for the client. This is mutually
exclusive with Authorization. If both are defined,
BasicAuth takes precedence.
properties:
password:
description: '`password` specifies a key of a
Secret containing the password for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a
Secret containing the username for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenSecret:
description: The secret's key that contains the bearer
token to be used by the client for authentication.
The secret needs to be in the same namespace as
the AlertmanagerConfig object and accessible by
the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
followRedirects:
description: FollowRedirects specifies whether the
client should follow HTTP 3xx redirects.
type: boolean
oauth2:
description: OAuth2 client credentials used to fetch
a token for the targets.
properties:
clientId:
description: '`clientId` specifies a key of a
Secret or ConfigMap containing the OAuth2 client''s
ID.'
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of
a Secret containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the
HTTP parameters to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes
used for the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to
fetch the token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyURL:
description: Optional proxy URL.
type: string
tlsConfig:
description: TLS configuration for the client.
properties:
ca:
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key
file for the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the
targets.
type: string
type: object
type: object
message:
description: The message content of the SNS notification.
type: string
phoneNumber:
description: Phone number if message is delivered via
SMS in E.164 format. If you don't specify this value,
you must specify a value for the TopicARN or TargetARN.
type: string
sendResolved:
description: Whether or not to notify about resolved alerts.
type: boolean
sigv4:
description: Configures AWS's Signature Verification 4
signing process to sign requests.
properties:
accessKey:
description: AccessKey is the AWS API key. If not
specified, the environment variable `AWS_ACCESS_KEY_ID`
is used.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
profile:
description: Profile is the named AWS profile used
to authenticate.
type: string
region:
description: Region is the AWS region. If blank, the
region from the default credentials chain used.
type: string
roleArn:
description: RoleArn is the named AWS profile used
to authenticate.
type: string
secretKey:
description: SecretKey is the AWS API secret. If not
specified, the environment variable `AWS_SECRET_ACCESS_KEY`
is used.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
subject:
description: Subject line when the message is delivered
to email endpoints.
type: string
targetARN:
description: The mobile platform endpoint ARN if message
is delivered via mobile notifications. If you don't
specify this value, you must specify a value for the
topic_arn or PhoneNumber.
type: string
topicARN:
description: SNS topic ARN, i.e. arn:aws:sns:us-east-2:698519295917:My-Topic
If you don't specify this value, you must specify a
value for the PhoneNumber or TargetARN.
type: string
type: object
type: array
telegramConfigs:
description: List of Telegram configurations.
items:
description: TelegramConfig configures notifications via Telegram.
See https://prometheus.io/docs/alerting/latest/configuration/#telegram_config
properties:
apiURL:
description: The Telegram API URL i.e. https://api.telegram.org.
If not specified, default API URL will be used.
type: string
botToken:
description: "Telegram bot token. It is mutually exclusive
with `botTokenFile`. The secret needs to be in the same
namespace as the AlertmanagerConfig object and accessible
by the Prometheus Operator. \n Either `botToken` or
`botTokenFile` is required."
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
botTokenFile:
description: "File to read the Telegram bot token from.
It is mutually exclusive with `botToken`. Either `botToken`
or `botTokenFile` is required. \n It requires Alertmanager
>= v0.26.0."
type: string
chatID:
description: The Telegram chat ID.
format: int64
type: integer
disableNotifications:
description: Disable telegram notifications
type: boolean
httpConfig:
description: HTTP client configuration.
properties:
authorization:
description: Authorization header configuration for
the client. This is mutually exclusive with BasicAuth
and is only available starting from Alertmanager
v0.22+.
properties:
credentials:
description: Selects a key of a Secret in the
namespace that contains the credentials for
authentication.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type.
The value is case-insensitive. \n \"Basic\"
is not a supported value. \n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: BasicAuth for the client. This is mutually
exclusive with Authorization. If both are defined,
BasicAuth takes precedence.
properties:
password:
description: '`password` specifies a key of a
Secret containing the password for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a
Secret containing the username for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenSecret:
description: The secret's key that contains the bearer
token to be used by the client for authentication.
The secret needs to be in the same namespace as
the AlertmanagerConfig object and accessible by
the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
followRedirects:
description: FollowRedirects specifies whether the
client should follow HTTP 3xx redirects.
type: boolean
oauth2:
description: OAuth2 client credentials used to fetch
a token for the targets.
properties:
clientId:
description: '`clientId` specifies a key of a
Secret or ConfigMap containing the OAuth2 client''s
ID.'
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of
a Secret containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the
HTTP parameters to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes
used for the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to
fetch the token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyURL:
description: Optional proxy URL.
type: string
tlsConfig:
description: TLS configuration for the client.
properties:
ca:
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key
file for the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the
targets.
type: string
type: object
type: object
message:
description: Message template
type: string
parseMode:
description: Parse mode for telegram message
enum:
- MarkdownV2
- Markdown
- HTML
type: string
sendResolved:
description: Whether to notify about resolved alerts.
type: boolean
type: object
type: array
victoropsConfigs:
description: List of VictorOps configurations.
items:
description: VictorOpsConfig configures notifications via
VictorOps. See https://prometheus.io/docs/alerting/latest/configuration/#victorops_config
properties:
apiKey:
description: The secret's key that contains the API key
to use when talking to the VictorOps API. The secret
needs to be in the same namespace as the AlertmanagerConfig
object and accessible by the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
apiUrl:
description: The VictorOps API URL.
type: string
customFields:
description: Additional custom fields for notification.
items:
description: KeyValue defines a (key, value) tuple.
properties:
key:
description: Key of the tuple.
minLength: 1
type: string
value:
description: Value of the tuple.
type: string
required:
- key
- value
type: object
type: array
entityDisplayName:
description: Contains summary of the alerted problem.
type: string
httpConfig:
description: The HTTP client's configuration.
properties:
authorization:
description: Authorization header configuration for
the client. This is mutually exclusive with BasicAuth
and is only available starting from Alertmanager
v0.22+.
properties:
credentials:
description: Selects a key of a Secret in the
namespace that contains the credentials for
authentication.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type.
The value is case-insensitive. \n \"Basic\"
is not a supported value. \n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: BasicAuth for the client. This is mutually
exclusive with Authorization. If both are defined,
BasicAuth takes precedence.
properties:
password:
description: '`password` specifies a key of a
Secret containing the password for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a
Secret containing the username for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenSecret:
description: The secret's key that contains the bearer
token to be used by the client for authentication.
The secret needs to be in the same namespace as
the AlertmanagerConfig object and accessible by
the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
followRedirects:
description: FollowRedirects specifies whether the
client should follow HTTP 3xx redirects.
type: boolean
oauth2:
description: OAuth2 client credentials used to fetch
a token for the targets.
properties:
clientId:
description: '`clientId` specifies a key of a
Secret or ConfigMap containing the OAuth2 client''s
ID.'
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of
a Secret containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the
HTTP parameters to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes
used for the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to
fetch the token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyURL:
description: Optional proxy URL.
type: string
tlsConfig:
description: TLS configuration for the client.
properties:
ca:
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key
file for the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the
targets.
type: string
type: object
type: object
messageType:
description: Describes the behavior of the alert (CRITICAL,
WARNING, INFO).
type: string
monitoringTool:
description: The monitoring tool the state message is
from.
type: string
routingKey:
description: A key used to map the alert to a team.
type: string
sendResolved:
description: Whether or not to notify about resolved alerts.
type: boolean
stateMessage:
description: Contains long explanation of the alerted
problem.
type: string
type: object
type: array
webexConfigs:
description: List of Webex configurations.
items:
description: WebexConfig configures notification via Cisco
Webex See https://prometheus.io/docs/alerting/latest/configuration/#webex_config
properties:
apiURL:
description: The Webex Teams API URL i.e. https://webexapis.com/v1/messages
Provide if different from the default API URL.
pattern: ^https?://.+$
type: string
httpConfig:
description: The HTTP client's configuration. You must
supply the bot token via the `httpConfig.authorization`
field.
properties:
authorization:
description: Authorization header configuration for
the client. This is mutually exclusive with BasicAuth
and is only available starting from Alertmanager
v0.22+.
properties:
credentials:
description: Selects a key of a Secret in the
namespace that contains the credentials for
authentication.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type.
The value is case-insensitive. \n \"Basic\"
is not a supported value. \n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: BasicAuth for the client. This is mutually
exclusive with Authorization. If both are defined,
BasicAuth takes precedence.
properties:
password:
description: '`password` specifies a key of a
Secret containing the password for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a
Secret containing the username for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenSecret:
description: The secret's key that contains the bearer
token to be used by the client for authentication.
The secret needs to be in the same namespace as
the AlertmanagerConfig object and accessible by
the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
followRedirects:
description: FollowRedirects specifies whether the
client should follow HTTP 3xx redirects.
type: boolean
oauth2:
description: OAuth2 client credentials used to fetch
a token for the targets.
properties:
clientId:
description: '`clientId` specifies a key of a
Secret or ConfigMap containing the OAuth2 client''s
ID.'
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of
a Secret containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the
HTTP parameters to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes
used for the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to
fetch the token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyURL:
description: Optional proxy URL.
type: string
tlsConfig:
description: TLS configuration for the client.
properties:
ca:
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key
file for the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the
targets.
type: string
type: object
type: object
message:
description: Message template
type: string
roomID:
description: ID of the Webex Teams room where to send
the messages.
minLength: 1
type: string
sendResolved:
description: Whether to notify about resolved alerts.
type: boolean
required:
- roomID
type: object
type: array
webhookConfigs:
description: List of webhook configurations.
items:
description: WebhookConfig configures notifications via a
generic receiver supporting the webhook payload. See https://prometheus.io/docs/alerting/latest/configuration/#webhook_config
properties:
httpConfig:
description: HTTP client configuration.
properties:
authorization:
description: Authorization header configuration for
the client. This is mutually exclusive with BasicAuth
and is only available starting from Alertmanager
v0.22+.
properties:
credentials:
description: Selects a key of a Secret in the
namespace that contains the credentials for
authentication.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type.
The value is case-insensitive. \n \"Basic\"
is not a supported value. \n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: BasicAuth for the client. This is mutually
exclusive with Authorization. If both are defined,
BasicAuth takes precedence.
properties:
password:
description: '`password` specifies a key of a
Secret containing the password for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a
Secret containing the username for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenSecret:
description: The secret's key that contains the bearer
token to be used by the client for authentication.
The secret needs to be in the same namespace as
the AlertmanagerConfig object and accessible by
the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
followRedirects:
description: FollowRedirects specifies whether the
client should follow HTTP 3xx redirects.
type: boolean
oauth2:
description: OAuth2 client credentials used to fetch
a token for the targets.
properties:
clientId:
description: '`clientId` specifies a key of a
Secret or ConfigMap containing the OAuth2 client''s
ID.'
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of
a Secret containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the
HTTP parameters to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes
used for the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to
fetch the token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyURL:
description: Optional proxy URL.
type: string
tlsConfig:
description: TLS configuration for the client.
properties:
ca:
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key
file for the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the
targets.
type: string
type: object
type: object
maxAlerts:
description: Maximum number of alerts to be sent per webhook
message. When 0, all alerts are included.
format: int32
minimum: 0
type: integer
sendResolved:
description: Whether or not to notify about resolved alerts.
type: boolean
url:
description: The URL to send HTTP POST requests to. `urlSecret`
takes precedence over `url`. One of `urlSecret` and
`url` should be defined.
type: string
urlSecret:
description: The secret's key that contains the webhook
URL to send HTTP requests to. `urlSecret` takes precedence
over `url`. One of `urlSecret` and `url` should be defined.
The secret needs to be in the same namespace as the
AlertmanagerConfig object and accessible by the Prometheus
Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
type: array
wechatConfigs:
description: List of WeChat configurations.
items:
description: WeChatConfig configures notifications via WeChat.
See https://prometheus.io/docs/alerting/latest/configuration/#wechat_config
properties:
agentID:
type: string
apiSecret:
description: The secret's key that contains the WeChat
API key. The secret needs to be in the same namespace
as the AlertmanagerConfig object and accessible by the
Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
apiURL:
description: The WeChat API URL.
type: string
corpID:
description: The corp id for authentication.
type: string
httpConfig:
description: HTTP client configuration.
properties:
authorization:
description: Authorization header configuration for
the client. This is mutually exclusive with BasicAuth
and is only available starting from Alertmanager
v0.22+.
properties:
credentials:
description: Selects a key of a Secret in the
namespace that contains the credentials for
authentication.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type.
The value is case-insensitive. \n \"Basic\"
is not a supported value. \n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: BasicAuth for the client. This is mutually
exclusive with Authorization. If both are defined,
BasicAuth takes precedence.
properties:
password:
description: '`password` specifies a key of a
Secret containing the password for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a
Secret containing the username for authentication.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenSecret:
description: The secret's key that contains the bearer
token to be used by the client for authentication.
The secret needs to be in the same namespace as
the AlertmanagerConfig object and accessible by
the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
followRedirects:
description: FollowRedirects specifies whether the
client should follow HTTP 3xx redirects.
type: boolean
oauth2:
description: OAuth2 client credentials used to fetch
a token for the targets.
properties:
clientId:
description: '`clientId` specifies a key of a
Secret or ConfigMap containing the OAuth2 client''s
ID.'
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of
a Secret containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the
HTTP parameters to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes
used for the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to
fetch the token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyURL:
description: Optional proxy URL.
type: string
tlsConfig:
description: TLS configuration for the client.
properties:
ca:
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use
for the targets.
properties:
key:
description: The key of the secret to
select from. Must be a valid secret
key.
type: string
name:
description: 'Name of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key
file for the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the
targets.
type: string
type: object
type: object
message:
description: API request data as defined by the WeChat
API.
type: string
messageType:
type: string
sendResolved:
description: Whether or not to notify about resolved alerts.
type: boolean
toParty:
type: string
toTag:
type: string
toUser:
type: string
type: object
type: array
required:
- name
type: object
type: array
route:
description: The Alertmanager route definition for alerts matching
the resource's namespace. If present, it will be added to the generated
Alertmanager configuration as a first-level route.
properties:
activeTimeIntervals:
description: ActiveTimeIntervals is a list of MuteTimeInterval
names when this route should be active.
items:
type: string
type: array
continue:
description: Boolean indicating whether an alert should continue
matching subsequent sibling nodes. It will always be overridden
to true for the first-level route by the Prometheus operator.
type: boolean
groupBy:
description: List of labels to group by. Labels must not be repeated
(unique list). Special label "..." (aggregate by all possible
labels), if provided, must be the only element in the list.
items:
type: string
type: array
groupInterval:
description: 'How long to wait before sending an updated notification.
Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$`
Example: "5m"'
type: string
groupWait:
description: 'How long to wait before sending the initial notification.
Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$`
Example: "30s"'
type: string
matchers:
description: 'List of matchers that the alert''s labels should
match. For the first level route, the operator removes any existing
equality and regexp matcher on the `namespace` label and adds
a `namespace: ` matcher.'
items:
description: Matcher defines how to match on alert's labels.
properties:
matchType:
description: Match operation available with AlertManager
>= v0.22.0 and takes precedence over Regex (deprecated)
if non-empty.
enum:
- '!='
- =
- =~
- '!~'
type: string
name:
description: Label to match.
minLength: 1
type: string
regex:
description: 'Whether to match on equality (false) or regular-expression
(true). Deprecated: for AlertManager >= v0.22.0, `matchType`
should be used instead.'
type: boolean
value:
description: Label value to match.
type: string
required:
- name
type: object
type: array
muteTimeIntervals:
description: 'Note: this comment applies to the field definition
above but appears below otherwise it gets included in the generated
manifest. CRD schema doesn''t support self-referential types
for now (see https://github.com/kubernetes/kubernetes/issues/62872).
We have to use an alternative type to circumvent the limitation.
The downside is that the Kube API can''t validate the data beyond
the fact that it is a valid JSON representation. MuteTimeIntervals
is a list of MuteTimeInterval names that will mute this route
when matched,'
items:
type: string
type: array
receiver:
description: Name of the receiver for this route. If not empty,
it should be listed in the `receivers` field.
type: string
repeatInterval:
description: 'How long to wait before repeating the last notification.
Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$`
Example: "4h"'
type: string
routes:
description: Child routes.
items:
x-kubernetes-preserve-unknown-fields: true
type: array
type: object
type: object
required:
- spec
type: object
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.13.0
operator.prometheus.io/version: 0.71.2
name: alertmanagers.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
categories:
- prometheus-operator
kind: Alertmanager
listKind: AlertmanagerList
plural: alertmanagers
shortNames:
- am
singular: alertmanager
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: The version of Alertmanager
jsonPath: .spec.version
name: Version
type: string
- description: The number of desired replicas
jsonPath: .spec.replicas
name: Replicas
type: integer
- description: The number of ready replicas
jsonPath: .status.availableReplicas
name: Ready
type: integer
- jsonPath: .status.conditions[?(@.type == 'Reconciled')].status
name: Reconciled
type: string
- jsonPath: .status.conditions[?(@.type == 'Available')].status
name: Available
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- description: Whether the resource reconciliation is paused or not
jsonPath: .status.paused
name: Paused
priority: 1
type: boolean
name: v1
schema:
openAPIV3Schema:
description: Alertmanager describes an Alertmanager cluster.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: 'Specification of the desired behavior of the Alertmanager
cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status'
properties:
additionalPeers:
description: AdditionalPeers allows injecting a set of additional
Alertmanagers to peer with to form a highly available cluster.
items:
type: string
type: array
affinity:
description: If specified, the pod's scheduling constraints.
properties:
nodeAffinity:
description: Describes node affinity scheduling rules for the
pod.
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to
nodes that satisfy the affinity expressions specified by
this field, but it may choose a node that violates one or
more of the expressions. The node that is most preferred
is the one with the greatest sum of weights, i.e. for each
node that meets all of the scheduling requirements (resource
request, requiredDuringScheduling affinity expressions,
etc.), compute a sum by iterating through the elements of
this field and adding "weight" to the sum if the node matches
the corresponding matchExpressions; the node(s) with the
highest sum are the most preferred.
items:
description: An empty preferred scheduling term matches
all objects with implicit weight 0 (i.e. it's a no-op).
A null preferred scheduling term matches no objects (i.e.
is also a no-op).
properties:
preference:
description: A node selector term, associated with the
corresponding weight.
properties:
matchExpressions:
description: A list of node selector requirements
by node's labels.
items:
description: A node selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists, DoesNotExist. Gt, and
Lt.
type: string
values:
description: An array of string values. If
the operator is In or NotIn, the values
array must be non-empty. If the operator
is Exists or DoesNotExist, the values array
must be empty. If the operator is Gt or
Lt, the values array must have a single
element, which will be interpreted as an
integer. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements
by node's fields.
items:
description: A node selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists, DoesNotExist. Gt, and
Lt.
type: string
values:
description: An array of string values. If
the operator is In or NotIn, the values
array must be non-empty. If the operator
is Exists or DoesNotExist, the values array
must be empty. If the operator is Gt or
Lt, the values array must have a single
element, which will be interpreted as an
integer. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
x-kubernetes-map-type: atomic
weight:
description: Weight associated with matching the corresponding
nodeSelectorTerm, in the range 1-100.
format: int32
type: integer
required:
- preference
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified by this
field are not met at scheduling time, the pod will not be
scheduled onto the node. If the affinity requirements specified
by this field cease to be met at some point during pod execution
(e.g. due to an update), the system may or may not try to
eventually evict the pod from its node.
properties:
nodeSelectorTerms:
description: Required. A list of node selector terms.
The terms are ORed.
items:
description: A null or empty node selector term matches
no objects. The requirements of them are ANDed. The
TopologySelectorTerm type implements a subset of the
NodeSelectorTerm.
properties:
matchExpressions:
description: A list of node selector requirements
by node's labels.
items:
description: A node selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists, DoesNotExist. Gt, and
Lt.
type: string
values:
description: An array of string values. If
the operator is In or NotIn, the values
array must be non-empty. If the operator
is Exists or DoesNotExist, the values array
must be empty. If the operator is Gt or
Lt, the values array must have a single
element, which will be interpreted as an
integer. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements
by node's fields.
items:
description: A node selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists, DoesNotExist. Gt, and
Lt.
type: string
values:
description: An array of string values. If
the operator is In or NotIn, the values
array must be non-empty. If the operator
is Exists or DoesNotExist, the values array
must be empty. If the operator is Gt or
Lt, the values array must have a single
element, which will be interpreted as an
integer. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
x-kubernetes-map-type: atomic
type: array
required:
- nodeSelectorTerms
type: object
x-kubernetes-map-type: atomic
type: object
podAffinity:
description: Describes pod affinity scheduling rules (e.g. co-locate
this pod in the same node, zone, etc. as some other pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to
nodes that satisfy the affinity expressions specified by
this field, but it may choose a node that violates one or
more of the expressions. The node that is most preferred
is the one with the greatest sum of weights, i.e. for each
node that meets all of the scheduling requirements (resource
request, requiredDuringScheduling affinity expressions,
etc.), compute a sum by iterating through the elements of
this field and adding "weight" to the sum if the node has
pods which matches the corresponding podAffinityTerm; the
node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm
fields are added per-node to find the most preferred node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term, associated
with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied
to the union of the namespaces selected by this
field and the ones listed in the namespaces field.
null selector and null or empty namespaces list
means "this pod's namespace". An empty selector
({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list
of namespace names that the term applies to. The
term is applied to the union of the namespaces
listed in this field and the ones selected by
namespaceSelector. null or empty namespaces list
and null namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods
matching the labelSelector in the specified namespaces,
where co-located is defined as running on a node
whose value of the label with key topologyKey
matches that of any node on which any of the selected
pods is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching the corresponding
podAffinityTerm, in the range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified by this
field are not met at scheduling time, the pod will not be
scheduled onto the node. If the affinity requirements specified
by this field cease to be met at some point during pod execution
(e.g. due to a pod label update), the system may or may
not try to eventually evict the pod from its node. When
there are multiple elements, the lists of nodes corresponding
to each podAffinityTerm are intersected, i.e. all terms
must be satisfied.
items:
description: Defines a set of pods (namely those matching
the labelSelector relative to the given namespace(s))
that this pod should be co-located (affinity) or not co-located
(anti-affinity) with, where co-located is defined as running
on a node whose value of the label with key
matches that of any node on which a pod of the set of
pods is running
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If the
operator is Exists or DoesNotExist, the
values array must be empty. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value". The
requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied to the
union of the namespaces selected by this field and
the ones listed in the namespaces field. null selector
and null or empty namespaces list means "this pod's
namespace". An empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If the
operator is Exists or DoesNotExist, the
values array must be empty. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value". The
requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list of namespace
names that the term applies to. The term is applied
to the union of the namespaces listed in this field
and the ones selected by namespaceSelector. null or
empty namespaces list and null namespaceSelector means
"this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods matching
the labelSelector in the specified namespaces, where
co-located is defined as running on a node whose value
of the label with key topologyKey matches that of
any node on which any of the selected pods is running.
Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
podAntiAffinity:
description: Describes pod anti-affinity scheduling rules (e.g.
avoid putting this pod in the same node, zone, etc. as some
other pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to
nodes that satisfy the anti-affinity expressions specified
by this field, but it may choose a node that violates one
or more of the expressions. The node that is most preferred
is the one with the greatest sum of weights, i.e. for each
node that meets all of the scheduling requirements (resource
request, requiredDuringScheduling anti-affinity expressions,
etc.), compute a sum by iterating through the elements of
this field and adding "weight" to the sum if the node has
pods which matches the corresponding podAffinityTerm; the
node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm
fields are added per-node to find the most preferred node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term, associated
with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied
to the union of the namespaces selected by this
field and the ones listed in the namespaces field.
null selector and null or empty namespaces list
means "this pod's namespace". An empty selector
({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list
of namespace names that the term applies to. The
term is applied to the union of the namespaces
listed in this field and the ones selected by
namespaceSelector. null or empty namespaces list
and null namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods
matching the labelSelector in the specified namespaces,
where co-located is defined as running on a node
whose value of the label with key topologyKey
matches that of any node on which any of the selected
pods is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching the corresponding
podAffinityTerm, in the range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the anti-affinity requirements specified by
this field are not met at scheduling time, the pod will
not be scheduled onto the node. If the anti-affinity requirements
specified by this field cease to be met at some point during
pod execution (e.g. due to a pod label update), the system
may or may not try to eventually evict the pod from its
node. When there are multiple elements, the lists of nodes
corresponding to each podAffinityTerm are intersected, i.e.
all terms must be satisfied.
items:
description: Defines a set of pods (namely those matching
the labelSelector relative to the given namespace(s))
that this pod should be co-located (affinity) or not co-located
(anti-affinity) with, where co-located is defined as running
on a node whose value of the label with key
matches that of any node on which a pod of the set of
pods is running
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If the
operator is Exists or DoesNotExist, the
values array must be empty. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value". The
requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied to the
union of the namespaces selected by this field and
the ones listed in the namespaces field. null selector
and null or empty namespaces list means "this pod's
namespace". An empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If the
operator is Exists or DoesNotExist, the
values array must be empty. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value". The
requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list of namespace
names that the term applies to. The term is applied
to the union of the namespaces listed in this field
and the ones selected by namespaceSelector. null or
empty namespaces list and null namespaceSelector means
"this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods matching
the labelSelector in the specified namespaces, where
co-located is defined as running on a node whose value
of the label with key topologyKey matches that of
any node on which any of the selected pods is running.
Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
type: object
alertmanagerConfigMatcherStrategy:
description: The AlertmanagerConfigMatcherStrategy defines how AlertmanagerConfig
objects match the alerts. In the future more options may be added.
properties:
type:
default: OnNamespace
description: If set to `OnNamespace`, the operator injects a label
matcher matching the namespace of the AlertmanagerConfig object
for all its routes and inhibition rules. `None` will not add
any additional matchers other than the ones specified in the
AlertmanagerConfig. Default is `OnNamespace`.
enum:
- OnNamespace
- None
type: string
type: object
alertmanagerConfigNamespaceSelector:
description: Namespaces to be selected for AlertmanagerConfig discovery.
If nil, only check own namespace.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
alertmanagerConfigSelector:
description: AlertmanagerConfigs to be selected for to merge and configure
Alertmanager with.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
alertmanagerConfiguration:
description: 'EXPERIMENTAL: alertmanagerConfiguration specifies the
configuration of Alertmanager. If defined, it takes precedence over
the `configSecret` field. This field may change in future releases.'
properties:
global:
description: Defines the global parameters of the Alertmanager
configuration.
properties:
httpConfig:
description: HTTP client configuration.
properties:
authorization:
description: Authorization header configuration for the
client. This is mutually exclusive with BasicAuth and
is only available starting from Alertmanager v0.22+.
properties:
credentials:
description: Selects a key of a Secret in the namespace
that contains the credentials for authentication.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type. The
value is case-insensitive. \n \"Basic\" is not a
supported value. \n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: BasicAuth for the client. This is mutually
exclusive with Authorization. If both are defined, BasicAuth
takes precedence.
properties:
password:
description: '`password` specifies a key of a Secret
containing the password for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a Secret
containing the username for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenSecret:
description: The secret's key that contains the bearer
token to be used by the client for authentication. The
secret needs to be in the same namespace as the Alertmanager
object and accessible by the Prometheus Operator.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
followRedirects:
description: FollowRedirects specifies whether the client
should follow HTTP 3xx redirects.
type: boolean
oauth2:
description: OAuth2 client credentials used to fetch a
token for the targets.
properties:
clientId:
description: '`clientId` specifies a key of a Secret
or ConfigMap containing the OAuth2 client''s ID.'
properties:
configMap:
description: ConfigMap containing data to use
for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for
the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of a
Secret containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the HTTP
parameters to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes used
for the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to fetch
the token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyURL:
description: Optional proxy URL.
type: string
tlsConfig:
description: TLS configuration for the client.
properties:
ca:
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to use
for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for
the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when doing
client-authentication.
properties:
configMap:
description: ConfigMap containing data to use
for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap
or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for
the targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key file
for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
type: object
opsGenieApiKey:
description: The default OpsGenie API Key.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
opsGenieApiUrl:
description: The default OpsGenie API URL.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
pagerdutyUrl:
description: The default Pagerduty URL.
type: string
resolveTimeout:
description: ResolveTimeout is the default value used by alertmanager
if the alert does not include EndsAt, after this time passes
it can declare the alert as resolved if it has not been
updated. This has no impact on alerts from Prometheus, as
they always include EndsAt.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
slackApiUrl:
description: The default Slack API URL.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
smtp:
description: Configures global SMTP parameters.
properties:
authIdentity:
description: SMTP Auth using PLAIN
type: string
authPassword:
description: SMTP Auth using LOGIN and PLAIN.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
authSecret:
description: SMTP Auth using CRAM-MD5.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
authUsername:
description: SMTP Auth using CRAM-MD5, LOGIN and PLAIN.
If empty, Alertmanager doesn't authenticate to the SMTP
server.
type: string
from:
description: The default SMTP From header field.
type: string
hello:
description: The default hostname to identify to the SMTP
server.
type: string
requireTLS:
description: The default SMTP TLS requirement. Note that
Go does not support unencrypted connections to remote
SMTP endpoints.
type: boolean
smartHost:
description: The default SMTP smarthost used for sending
emails.
properties:
host:
description: Defines the host's address, it can be
a DNS name or a literal IP address.
minLength: 1
type: string
port:
description: Defines the host's port, it can be a
literal port number or a port name.
minLength: 1
type: string
required:
- host
- port
type: object
type: object
type: object
name:
description: The name of the AlertmanagerConfig resource which
is used to generate the Alertmanager configuration. It must
be defined in the same namespace as the Alertmanager object.
The operator will not enforce a `namespace` label for routes
and inhibition rules.
minLength: 1
type: string
templates:
description: Custom notification templates.
items:
description: SecretOrConfigMap allows to specify data as a Secret
or ConfigMap. Fields are mutually exclusive.
properties:
configMap:
description: ConfigMap containing data to use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
type: array
type: object
automountServiceAccountToken:
description: 'AutomountServiceAccountToken indicates whether a service
account token should be automatically mounted in the pod. If the
service account has `automountServiceAccountToken: true`, set the
field to `false` to opt out of automounting API credentials.'
type: boolean
baseImage:
description: 'Base image that is used to deploy pods, without tag.
Deprecated: use ''image'' instead.'
type: string
clusterAdvertiseAddress:
description: 'ClusterAdvertiseAddress is the explicit address to advertise
in cluster. Needs to be provided for non RFC1918 [1] (public) addresses.
[1] RFC1918: https://tools.ietf.org/html/rfc1918'
type: string
clusterGossipInterval:
description: Interval between gossip attempts.
pattern: ^(0|(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
clusterLabel:
description: Defines the identifier that uniquely identifies the Alertmanager
cluster. You should only set it when the Alertmanager cluster includes
Alertmanager instances which are external to this Alertmanager resource.
In practice, the addresses of the external instances are provided
via the `.spec.additionalPeers` field.
type: string
clusterPeerTimeout:
description: Timeout for cluster peering.
pattern: ^(0|(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
clusterPushpullInterval:
description: Interval between pushpull attempts.
pattern: ^(0|(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
configMaps:
description: ConfigMaps is a list of ConfigMaps in the same namespace
as the Alertmanager object, which shall be mounted into the Alertmanager
Pods. Each ConfigMap is added to the StatefulSet definition as a
volume named `configmap-`. The ConfigMaps are mounted
into `/etc/alertmanager/configmaps/` in the 'alertmanager'
container.
items:
type: string
type: array
configSecret:
description: "ConfigSecret is the name of a Kubernetes Secret in the
same namespace as the Alertmanager object, which contains the configuration
for this Alertmanager instance. If empty, it defaults to `alertmanager-`.
\n The Alertmanager configuration should be available under the
`alertmanager.yaml` key. Additional keys from the original secret
are copied to the generated secret and mounted into the `/etc/alertmanager/config`
directory in the `alertmanager` container. \n If either the secret
or the `alertmanager.yaml` key is missing, the operator provisions
a minimal Alertmanager configuration with one empty receiver (effectively
dropping alert notifications)."
type: string
containers:
description: 'Containers allows injecting additional containers. This
is meant to allow adding an authentication proxy to an Alertmanager
pod. Containers described here modify an operator generated container
if they share the same name and modifications are done via a strategic
merge patch. The current container names are: `alertmanager` and
`config-reloader`. Overriding containers is entirely outside the
scope of what the maintainers will support and by doing so, you
accept that this behaviour may break at any time without notice.'
items:
description: A single application container that you want to run
within a pod.
properties:
args:
description: 'Arguments to the entrypoint. The container image''s
CMD is used if this is not provided. Variable references $(VAR_NAME)
are expanded using the container''s environment. If a variable
cannot be resolved, the reference in the input string will
be unchanged. Double $$ are reduced to a single $, which allows
for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
produce the string literal "$(VAR_NAME)". Escaped references
will never be expanded, regardless of whether the variable
exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
items:
type: string
type: array
command:
description: 'Entrypoint array. Not executed within a shell.
The container image''s ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container''s
environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME) syntax:
i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
Escaped references will never be expanded, regardless of whether
the variable exists or not. Cannot be updated. More info:
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
items:
type: string
type: array
env:
description: List of environment variables to set in the container.
Cannot be updated.
items:
description: EnvVar represents an environment variable present
in a Container.
properties:
name:
description: Name of the environment variable. Must be
a C_IDENTIFIER.
type: string
value:
description: 'Variable references $(VAR_NAME) are expanded
using the previously defined environment variables in
the container and any service environment variables.
If a variable cannot be resolved, the reference in the
input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME)
syntax: i.e. "$$(VAR_NAME)" will produce the string
literal "$(VAR_NAME)". Escaped references will never
be expanded, regardless of whether the variable exists
or not. Defaults to "".'
type: string
valueFrom:
description: Source for the environment variable's value.
Cannot be used if value is not empty.
properties:
configMapKeyRef:
description: Selects a key of a ConfigMap.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
fieldRef:
description: 'Selects a field of the pod: supports
metadata.name, metadata.namespace, `metadata.labels['''']`,
`metadata.annotations['''']`, spec.nodeName,
spec.serviceAccountName, status.hostIP, status.podIP,
status.podIPs.'
properties:
apiVersion:
description: Version of the schema the FieldPath
is written in terms of, defaults to "v1".
type: string
fieldPath:
description: Path of the field to select in the
specified API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: 'Selects a resource of the container:
only resources limits and requests (limits.cpu,
limits.memory, limits.ephemeral-storage, requests.cpu,
requests.memory and requests.ephemeral-storage)
are currently supported.'
properties:
containerName:
description: 'Container name: required for volumes,
optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format of the
exposed resources, defaults to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
description: 'Required: resource to select'
type: string
required:
- resource
type: object
x-kubernetes-map-type: atomic
secretKeyRef:
description: Selects a key of a secret in the pod's
namespace
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
required:
- name
type: object
type: array
envFrom:
description: List of sources to populate environment variables
in the container. The keys defined within a source must be
a C_IDENTIFIER. All invalid keys will be reported as an event
when the container is starting. When a key exists in multiple
sources, the value associated with the last source will take
precedence. Values defined by an Env with a duplicate key
will take precedence. Cannot be updated.
items:
description: EnvFromSource represents the source of a set
of ConfigMaps
properties:
configMapRef:
description: The ConfigMap to select from
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap must be
defined
type: boolean
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each
key in the ConfigMap. Must be a C_IDENTIFIER.
type: string
secretRef:
description: The Secret to select from
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret must be defined
type: boolean
type: object
x-kubernetes-map-type: atomic
type: object
type: array
image:
description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images
This field is optional to allow higher level config management
to default or override container images in workload controllers
like Deployments and StatefulSets.'
type: string
imagePullPolicy:
description: 'Image pull policy. One of Always, Never, IfNotPresent.
Defaults to Always if :latest tag is specified, or IfNotPresent
otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images'
type: string
lifecycle:
description: Actions that the management system should take
in response to container lifecycle events. Cannot be updated.
properties:
postStart:
description: 'PostStart is called immediately after a container
is created. If the handler fails, the container is terminated
and restarted according to its restart policy. Other management
of the container blocks until the hook completes. More
info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for
the command is root ('/') in the container's
filesystem. The command is simply exec'd, it is
not run inside a shell, so traditional shell instructions
('|', etc) won't work. To use a shell, you need
to explicitly call out to that shell. Exit status
of 0 is treated as live/healthy and non-zero is
unhealthy.
items:
type: string
type: array
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to
the pod IP. You probably want to set "Host" in
httpHeaders instead.
type: string
httpHeaders:
description: Custom headers to set in the request.
HTTP allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the
host. Defaults to HTTP.
type: string
required:
- port
type: object
tcpSocket:
description: Deprecated. TCPSocket is NOT supported
as a LifecycleHandler and kept for the backward compatibility.
There are no validation of this field and lifecycle
hooks will fail in runtime when tcp handler is specified.
properties:
host:
description: 'Optional: Host name to connect to,
defaults to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
type: object
preStop:
description: 'PreStop is called immediately before a container
is terminated due to an API request or management event
such as liveness/startup probe failure, preemption, resource
contention, etc. The handler is not called if the container
crashes or exits. The Pod''s termination grace period
countdown begins before the PreStop hook is executed.
Regardless of the outcome of the handler, the container
will eventually terminate within the Pod''s termination
grace period (unless delayed by finalizers). Other management
of the container blocks until the hook completes or until
the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for
the command is root ('/') in the container's
filesystem. The command is simply exec'd, it is
not run inside a shell, so traditional shell instructions
('|', etc) won't work. To use a shell, you need
to explicitly call out to that shell. Exit status
of 0 is treated as live/healthy and non-zero is
unhealthy.
items:
type: string
type: array
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to
the pod IP. You probably want to set "Host" in
httpHeaders instead.
type: string
httpHeaders:
description: Custom headers to set in the request.
HTTP allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the
host. Defaults to HTTP.
type: string
required:
- port
type: object
tcpSocket:
description: Deprecated. TCPSocket is NOT supported
as a LifecycleHandler and kept for the backward compatibility.
There are no validation of this field and lifecycle
hooks will fail in runtime when tcp handler is specified.
properties:
host:
description: 'Optional: Host name to connect to,
defaults to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
type: object
type: object
livenessProbe:
description: 'Periodic probe of container liveness. Container
will be restarted if the probe fails. Cannot be updated. More
info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
name:
description: Name of the container specified as a DNS_LABEL.
Each container in a pod must have a unique name (DNS_LABEL).
Cannot be updated.
type: string
ports:
description: List of ports to expose from the container. Not
specifying a port here DOES NOT prevent that port from being
exposed. Any port which is listening on the default "0.0.0.0"
address inside a container will be accessible from the network.
Modifying this array with strategic merge patch may corrupt
the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255.
Cannot be updated.
items:
description: ContainerPort represents a network port in a
single container.
properties:
containerPort:
description: Number of port to expose on the pod's IP
address. This must be a valid port number, 0 < x < 65536.
format: int32
type: integer
hostIP:
description: What host IP to bind the external port to.
type: string
hostPort:
description: Number of port to expose on the host. If
specified, this must be a valid port number, 0 < x <
65536. If HostNetwork is specified, this must match
ContainerPort. Most containers do not need this.
format: int32
type: integer
name:
description: If specified, this must be an IANA_SVC_NAME
and unique within the pod. Each named port in a pod
must have a unique name. Name for the port that can
be referred to by services.
type: string
protocol:
default: TCP
description: Protocol for port. Must be UDP, TCP, or SCTP.
Defaults to "TCP".
type: string
required:
- containerPort
type: object
type: array
x-kubernetes-list-map-keys:
- containerPort
- protocol
x-kubernetes-list-type: map
readinessProbe:
description: 'Periodic probe of container service readiness.
Container will be removed from service endpoints if the probe
fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
resizePolicy:
description: Resources resize policy for the container.
items:
description: ContainerResizePolicy represents resource resize
policy for the container.
properties:
resourceName:
description: 'Name of the resource to which this resource
resize policy applies. Supported values: cpu, memory.'
type: string
restartPolicy:
description: Restart policy to apply when specified resource
is resized. If not specified, it defaults to NotRequired.
type: string
required:
- resourceName
- restartPolicy
type: object
type: array
x-kubernetes-list-type: atomic
resources:
description: 'Compute Resources required by this container.
Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
properties:
claims:
description: "Claims lists the names of resources, defined
in spec.resourceClaims, that are used by this container.
\n This is an alpha field and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable. It can only
be set for containers."
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of one entry
in pod.spec.resourceClaims of the Pod where this
field is used. It makes that resource available
inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount of compute
resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount of compute
resources required. If Requests is omitted for a container,
it defaults to Limits if that is explicitly specified,
otherwise to an implementation-defined value. Requests
cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
restartPolicy:
description: 'RestartPolicy defines the restart behavior of
individual containers in a pod. This field may only be set
for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
the restart behavior is defined by the Pod''s restart policy
and the container type. Setting the RestartPolicy as "Always"
for the init container will have the following effect: this
init container will be continually restarted on exit until
all regular containers have terminated. Once all regular containers
have completed, all init containers with restartPolicy "Always"
will be shut down. This lifecycle differs from normal init
containers and is often referred to as a "sidecar" container.
Although this init container still starts in the init container
sequence, it does not wait for the container to complete before
proceeding to the next init container. Instead, the next init
container starts immediately after this init container is
started, or after any startupProbe has successfully completed.'
type: string
securityContext:
description: 'SecurityContext defines the security options the
container should be run with. If set, the fields of SecurityContext
override the equivalent fields of PodSecurityContext. More
info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/'
properties:
allowPrivilegeEscalation:
description: 'AllowPrivilegeEscalation controls whether
a process can gain more privileges than its parent process.
This bool directly controls if the no_new_privs flag will
be set on the container process. AllowPrivilegeEscalation
is true always when the container is: 1) run as Privileged
2) has CAP_SYS_ADMIN Note that this field cannot be set
when spec.os.name is windows.'
type: boolean
capabilities:
description: The capabilities to add/drop when running containers.
Defaults to the default set of capabilities granted by
the container runtime. Note that this field cannot be
set when spec.os.name is windows.
properties:
add:
description: Added capabilities
items:
description: Capability represent POSIX capabilities
type
type: string
type: array
drop:
description: Removed capabilities
items:
description: Capability represent POSIX capabilities
type
type: string
type: array
type: object
privileged:
description: Run container in privileged mode. Processes
in privileged containers are essentially equivalent to
root on the host. Defaults to false. Note that this field
cannot be set when spec.os.name is windows.
type: boolean
procMount:
description: procMount denotes the type of proc mount to
use for the containers. The default is DefaultProcMount
which uses the container runtime defaults for readonly
paths and masked paths. This requires the ProcMountType
feature flag to be enabled. Note that this field cannot
be set when spec.os.name is windows.
type: string
readOnlyRootFilesystem:
description: Whether this container has a read-only root
filesystem. Default is false. Note that this field cannot
be set when spec.os.name is windows.
type: boolean
runAsGroup:
description: The GID to run the entrypoint of the container
process. Uses runtime default if unset. May also be set
in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence. Note that this field cannot be set when
spec.os.name is windows.
format: int64
type: integer
runAsNonRoot:
description: Indicates that the container must run as a
non-root user. If true, the Kubelet will validate the
image at runtime to ensure that it does not run as UID
0 (root) and fail to start the container if it does. If
unset or false, no such validation will be performed.
May also be set in PodSecurityContext. If set in both
SecurityContext and PodSecurityContext, the value specified
in SecurityContext takes precedence.
type: boolean
runAsUser:
description: The UID to run the entrypoint of the container
process. Defaults to user specified in image metadata
if unspecified. May also be set in PodSecurityContext. If
set in both SecurityContext and PodSecurityContext, the
value specified in SecurityContext takes precedence. Note
that this field cannot be set when spec.os.name is windows.
format: int64
type: integer
seLinuxOptions:
description: The SELinux context to be applied to the container.
If unspecified, the container runtime will allocate a
random SELinux context for each container. May also be
set in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence. Note that this field cannot be set when
spec.os.name is windows.
properties:
level:
description: Level is SELinux level label that applies
to the container.
type: string
role:
description: Role is a SELinux role label that applies
to the container.
type: string
type:
description: Type is a SELinux type label that applies
to the container.
type: string
user:
description: User is a SELinux user label that applies
to the container.
type: string
type: object
seccompProfile:
description: The seccomp options to use by this container.
If seccomp options are provided at both the pod & container
level, the container options override the pod options.
Note that this field cannot be set when spec.os.name is
windows.
properties:
localhostProfile:
description: localhostProfile indicates a profile defined
in a file on the node should be used. The profile
must be preconfigured on the node to work. Must be
a descending path, relative to the kubelet's configured
seccomp profile location. Must be set if type is "Localhost".
Must NOT be set for any other type.
type: string
type:
description: "type indicates which kind of seccomp profile
will be applied. Valid options are: \n Localhost -
a profile defined in a file on the node should be
used. RuntimeDefault - the container runtime default
profile should be used. Unconfined - no profile should
be applied."
type: string
required:
- type
type: object
windowsOptions:
description: The Windows specific settings applied to all
containers. If unspecified, the options from the PodSecurityContext
will be used. If set in both SecurityContext and PodSecurityContext,
the value specified in SecurityContext takes precedence.
Note that this field cannot be set when spec.os.name is
linux.
properties:
gmsaCredentialSpec:
description: GMSACredentialSpec is where the GMSA admission
webhook (https://github.com/kubernetes-sigs/windows-gmsa)
inlines the contents of the GMSA credential spec named
by the GMSACredentialSpecName field.
type: string
gmsaCredentialSpecName:
description: GMSACredentialSpecName is the name of the
GMSA credential spec to use.
type: string
hostProcess:
description: HostProcess determines if a container should
be run as a 'Host Process' container. All of a Pod's
containers must have the same effective HostProcess
value (it is not allowed to have a mix of HostProcess
containers and non-HostProcess containers). In addition,
if HostProcess is true then HostNetwork must also
be set to true.
type: boolean
runAsUserName:
description: The UserName in Windows to run the entrypoint
of the container process. Defaults to the user specified
in image metadata if unspecified. May also be set
in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence.
type: string
type: object
type: object
startupProbe:
description: 'StartupProbe indicates that the Pod has successfully
initialized. If specified, no other probes are executed until
this completes successfully. If this probe fails, the Pod
will be restarted, just as if the livenessProbe failed. This
can be used to provide different probe parameters at the beginning
of a Pod''s lifecycle, when it might take a long time to load
data or warm a cache, than during steady-state operation.
This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
stdin:
description: Whether this container should allocate a buffer
for stdin in the container runtime. If this is not set, reads
from stdin in the container will always result in EOF. Default
is false.
type: boolean
stdinOnce:
description: Whether the container runtime should close the
stdin channel after it has been opened by a single attach.
When stdin is true the stdin stream will remain open across
multiple attach sessions. If stdinOnce is set to true, stdin
is opened on container start, is empty until the first client
attaches to stdin, and then remains open and accepts data
until the client disconnects, at which time stdin is closed
and remains closed until the container is restarted. If this
flag is false, a container processes that reads from stdin
will never receive an EOF. Default is false
type: boolean
terminationMessagePath:
description: 'Optional: Path at which the file to which the
container''s termination message will be written is mounted
into the container''s filesystem. Message written is intended
to be brief final status, such as an assertion failure message.
Will be truncated by the node if greater than 4096 bytes.
The total message length across all containers will be limited
to 12kb. Defaults to /dev/termination-log. Cannot be updated.'
type: string
terminationMessagePolicy:
description: Indicate how the termination message should be
populated. File will use the contents of terminationMessagePath
to populate the container status message on both success and
failure. FallbackToLogsOnError will use the last chunk of
container log output if the termination message file is empty
and the container exited with an error. The log output is
limited to 2048 bytes or 80 lines, whichever is smaller. Defaults
to File. Cannot be updated.
type: string
tty:
description: Whether this container should allocate a TTY for
itself, also requires 'stdin' to be true. Default is false.
type: boolean
volumeDevices:
description: volumeDevices is the list of block devices to be
used by the container.
items:
description: volumeDevice describes a mapping of a raw block
device within a container.
properties:
devicePath:
description: devicePath is the path inside of the container
that the device will be mapped to.
type: string
name:
description: name must match the name of a persistentVolumeClaim
in the pod
type: string
required:
- devicePath
- name
type: object
type: array
volumeMounts:
description: Pod volumes to mount into the container's filesystem.
Cannot be updated.
items:
description: VolumeMount describes a mounting of a Volume
within a container.
properties:
mountPath:
description: Path within the container at which the volume
should be mounted. Must not contain ':'.
type: string
mountPropagation:
description: mountPropagation determines how mounts are
propagated from the host to container and the other
way around. When not set, MountPropagationNone is used.
This field is beta in 1.10.
type: string
name:
description: This must match the Name of a Volume.
type: string
readOnly:
description: Mounted read-only if true, read-write otherwise
(false or unspecified). Defaults to false.
type: boolean
subPath:
description: Path within the volume from which the container's
volume should be mounted. Defaults to "" (volume's root).
type: string
subPathExpr:
description: Expanded path within the volume from which
the container's volume should be mounted. Behaves similarly
to SubPath but environment variable references $(VAR_NAME)
are expanded using the container's environment. Defaults
to "" (volume's root). SubPathExpr and SubPath are mutually
exclusive.
type: string
required:
- mountPath
- name
type: object
type: array
workingDir:
description: Container's working directory. If not specified,
the container runtime's default will be used, which might
be configured in the container image. Cannot be updated.
type: string
required:
- name
type: object
type: array
externalUrl:
description: The external URL the Alertmanager instances will be available
under. This is necessary to generate correct URLs. This is necessary
if Alertmanager is not served from root of a DNS name.
type: string
forceEnableClusterMode:
description: ForceEnableClusterMode ensures Alertmanager does not
deactivate the cluster mode when running with a single replica.
Use case is e.g. spanning an Alertmanager cluster across Kubernetes
clusters with a single replica in each.
type: boolean
hostAliases:
description: Pods' hostAliases configuration
items:
description: HostAlias holds the mapping between IP and hostnames
that will be injected as an entry in the pod's hosts file.
properties:
hostnames:
description: Hostnames for the above IP address.
items:
type: string
type: array
ip:
description: IP address of the host file entry.
type: string
required:
- hostnames
- ip
type: object
type: array
x-kubernetes-list-map-keys:
- ip
x-kubernetes-list-type: map
image:
description: Image if specified has precedence over baseImage, tag
and sha combinations. Specifying the version is still necessary
to ensure the Prometheus Operator knows what version of Alertmanager
is being configured.
type: string
imagePullPolicy:
description: Image pull policy for the 'alertmanager', 'init-config-reloader'
and 'config-reloader' containers. See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy
for more details.
enum:
- ""
- Always
- Never
- IfNotPresent
type: string
imagePullSecrets:
description: An optional list of references to secrets in the same
namespace to use for pulling prometheus and alertmanager images
from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
items:
description: LocalObjectReference contains enough information to
let you locate the referenced object inside the same namespace.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
type: array
initContainers:
description: 'InitContainers allows adding initContainers to the pod
definition. Those can be used to e.g. fetch secrets for injection
into the Alertmanager configuration from external sources. Any errors
during the execution of an initContainer will lead to a restart
of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
InitContainers described here modify an operator generated init
containers if they share the same name and modifications are done
via a strategic merge patch. The current init container name is:
`init-config-reloader`. Overriding init containers is entirely outside
the scope of what the maintainers will support and by doing so,
you accept that this behaviour may break at any time without notice.'
items:
description: A single application container that you want to run
within a pod.
properties:
args:
description: 'Arguments to the entrypoint. The container image''s
CMD is used if this is not provided. Variable references $(VAR_NAME)
are expanded using the container''s environment. If a variable
cannot be resolved, the reference in the input string will
be unchanged. Double $$ are reduced to a single $, which allows
for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
produce the string literal "$(VAR_NAME)". Escaped references
will never be expanded, regardless of whether the variable
exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
items:
type: string
type: array
command:
description: 'Entrypoint array. Not executed within a shell.
The container image''s ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container''s
environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME) syntax:
i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
Escaped references will never be expanded, regardless of whether
the variable exists or not. Cannot be updated. More info:
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
items:
type: string
type: array
env:
description: List of environment variables to set in the container.
Cannot be updated.
items:
description: EnvVar represents an environment variable present
in a Container.
properties:
name:
description: Name of the environment variable. Must be
a C_IDENTIFIER.
type: string
value:
description: 'Variable references $(VAR_NAME) are expanded
using the previously defined environment variables in
the container and any service environment variables.
If a variable cannot be resolved, the reference in the
input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME)
syntax: i.e. "$$(VAR_NAME)" will produce the string
literal "$(VAR_NAME)". Escaped references will never
be expanded, regardless of whether the variable exists
or not. Defaults to "".'
type: string
valueFrom:
description: Source for the environment variable's value.
Cannot be used if value is not empty.
properties:
configMapKeyRef:
description: Selects a key of a ConfigMap.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
fieldRef:
description: 'Selects a field of the pod: supports
metadata.name, metadata.namespace, `metadata.labels['''']`,
`metadata.annotations['''']`, spec.nodeName,
spec.serviceAccountName, status.hostIP, status.podIP,
status.podIPs.'
properties:
apiVersion:
description: Version of the schema the FieldPath
is written in terms of, defaults to "v1".
type: string
fieldPath:
description: Path of the field to select in the
specified API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: 'Selects a resource of the container:
only resources limits and requests (limits.cpu,
limits.memory, limits.ephemeral-storage, requests.cpu,
requests.memory and requests.ephemeral-storage)
are currently supported.'
properties:
containerName:
description: 'Container name: required for volumes,
optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format of the
exposed resources, defaults to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
description: 'Required: resource to select'
type: string
required:
- resource
type: object
x-kubernetes-map-type: atomic
secretKeyRef:
description: Selects a key of a secret in the pod's
namespace
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
required:
- name
type: object
type: array
envFrom:
description: List of sources to populate environment variables
in the container. The keys defined within a source must be
a C_IDENTIFIER. All invalid keys will be reported as an event
when the container is starting. When a key exists in multiple
sources, the value associated with the last source will take
precedence. Values defined by an Env with a duplicate key
will take precedence. Cannot be updated.
items:
description: EnvFromSource represents the source of a set
of ConfigMaps
properties:
configMapRef:
description: The ConfigMap to select from
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap must be
defined
type: boolean
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each
key in the ConfigMap. Must be a C_IDENTIFIER.
type: string
secretRef:
description: The Secret to select from
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret must be defined
type: boolean
type: object
x-kubernetes-map-type: atomic
type: object
type: array
image:
description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images
This field is optional to allow higher level config management
to default or override container images in workload controllers
like Deployments and StatefulSets.'
type: string
imagePullPolicy:
description: 'Image pull policy. One of Always, Never, IfNotPresent.
Defaults to Always if :latest tag is specified, or IfNotPresent
otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images'
type: string
lifecycle:
description: Actions that the management system should take
in response to container lifecycle events. Cannot be updated.
properties:
postStart:
description: 'PostStart is called immediately after a container
is created. If the handler fails, the container is terminated
and restarted according to its restart policy. Other management
of the container blocks until the hook completes. More
info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for
the command is root ('/') in the container's
filesystem. The command is simply exec'd, it is
not run inside a shell, so traditional shell instructions
('|', etc) won't work. To use a shell, you need
to explicitly call out to that shell. Exit status
of 0 is treated as live/healthy and non-zero is
unhealthy.
items:
type: string
type: array
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to
the pod IP. You probably want to set "Host" in
httpHeaders instead.
type: string
httpHeaders:
description: Custom headers to set in the request.
HTTP allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the
host. Defaults to HTTP.
type: string
required:
- port
type: object
tcpSocket:
description: Deprecated. TCPSocket is NOT supported
as a LifecycleHandler and kept for the backward compatibility.
There are no validation of this field and lifecycle
hooks will fail in runtime when tcp handler is specified.
properties:
host:
description: 'Optional: Host name to connect to,
defaults to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
type: object
preStop:
description: 'PreStop is called immediately before a container
is terminated due to an API request or management event
such as liveness/startup probe failure, preemption, resource
contention, etc. The handler is not called if the container
crashes or exits. The Pod''s termination grace period
countdown begins before the PreStop hook is executed.
Regardless of the outcome of the handler, the container
will eventually terminate within the Pod''s termination
grace period (unless delayed by finalizers). Other management
of the container blocks until the hook completes or until
the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for
the command is root ('/') in the container's
filesystem. The command is simply exec'd, it is
not run inside a shell, so traditional shell instructions
('|', etc) won't work. To use a shell, you need
to explicitly call out to that shell. Exit status
of 0 is treated as live/healthy and non-zero is
unhealthy.
items:
type: string
type: array
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to
the pod IP. You probably want to set "Host" in
httpHeaders instead.
type: string
httpHeaders:
description: Custom headers to set in the request.
HTTP allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the
host. Defaults to HTTP.
type: string
required:
- port
type: object
tcpSocket:
description: Deprecated. TCPSocket is NOT supported
as a LifecycleHandler and kept for the backward compatibility.
There are no validation of this field and lifecycle
hooks will fail in runtime when tcp handler is specified.
properties:
host:
description: 'Optional: Host name to connect to,
defaults to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
type: object
type: object
livenessProbe:
description: 'Periodic probe of container liveness. Container
will be restarted if the probe fails. Cannot be updated. More
info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
name:
description: Name of the container specified as a DNS_LABEL.
Each container in a pod must have a unique name (DNS_LABEL).
Cannot be updated.
type: string
ports:
description: List of ports to expose from the container. Not
specifying a port here DOES NOT prevent that port from being
exposed. Any port which is listening on the default "0.0.0.0"
address inside a container will be accessible from the network.
Modifying this array with strategic merge patch may corrupt
the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255.
Cannot be updated.
items:
description: ContainerPort represents a network port in a
single container.
properties:
containerPort:
description: Number of port to expose on the pod's IP
address. This must be a valid port number, 0 < x < 65536.
format: int32
type: integer
hostIP:
description: What host IP to bind the external port to.
type: string
hostPort:
description: Number of port to expose on the host. If
specified, this must be a valid port number, 0 < x <
65536. If HostNetwork is specified, this must match
ContainerPort. Most containers do not need this.
format: int32
type: integer
name:
description: If specified, this must be an IANA_SVC_NAME
and unique within the pod. Each named port in a pod
must have a unique name. Name for the port that can
be referred to by services.
type: string
protocol:
default: TCP
description: Protocol for port. Must be UDP, TCP, or SCTP.
Defaults to "TCP".
type: string
required:
- containerPort
type: object
type: array
x-kubernetes-list-map-keys:
- containerPort
- protocol
x-kubernetes-list-type: map
readinessProbe:
description: 'Periodic probe of container service readiness.
Container will be removed from service endpoints if the probe
fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
resizePolicy:
description: Resources resize policy for the container.
items:
description: ContainerResizePolicy represents resource resize
policy for the container.
properties:
resourceName:
description: 'Name of the resource to which this resource
resize policy applies. Supported values: cpu, memory.'
type: string
restartPolicy:
description: Restart policy to apply when specified resource
is resized. If not specified, it defaults to NotRequired.
type: string
required:
- resourceName
- restartPolicy
type: object
type: array
x-kubernetes-list-type: atomic
resources:
description: 'Compute Resources required by this container.
Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
properties:
claims:
description: "Claims lists the names of resources, defined
in spec.resourceClaims, that are used by this container.
\n This is an alpha field and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable. It can only
be set for containers."
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of one entry
in pod.spec.resourceClaims of the Pod where this
field is used. It makes that resource available
inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount of compute
resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount of compute
resources required. If Requests is omitted for a container,
it defaults to Limits if that is explicitly specified,
otherwise to an implementation-defined value. Requests
cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
restartPolicy:
description: 'RestartPolicy defines the restart behavior of
individual containers in a pod. This field may only be set
for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
the restart behavior is defined by the Pod''s restart policy
and the container type. Setting the RestartPolicy as "Always"
for the init container will have the following effect: this
init container will be continually restarted on exit until
all regular containers have terminated. Once all regular containers
have completed, all init containers with restartPolicy "Always"
will be shut down. This lifecycle differs from normal init
containers and is often referred to as a "sidecar" container.
Although this init container still starts in the init container
sequence, it does not wait for the container to complete before
proceeding to the next init container. Instead, the next init
container starts immediately after this init container is
started, or after any startupProbe has successfully completed.'
type: string
securityContext:
description: 'SecurityContext defines the security options the
container should be run with. If set, the fields of SecurityContext
override the equivalent fields of PodSecurityContext. More
info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/'
properties:
allowPrivilegeEscalation:
description: 'AllowPrivilegeEscalation controls whether
a process can gain more privileges than its parent process.
This bool directly controls if the no_new_privs flag will
be set on the container process. AllowPrivilegeEscalation
is true always when the container is: 1) run as Privileged
2) has CAP_SYS_ADMIN Note that this field cannot be set
when spec.os.name is windows.'
type: boolean
capabilities:
description: The capabilities to add/drop when running containers.
Defaults to the default set of capabilities granted by
the container runtime. Note that this field cannot be
set when spec.os.name is windows.
properties:
add:
description: Added capabilities
items:
description: Capability represent POSIX capabilities
type
type: string
type: array
drop:
description: Removed capabilities
items:
description: Capability represent POSIX capabilities
type
type: string
type: array
type: object
privileged:
description: Run container in privileged mode. Processes
in privileged containers are essentially equivalent to
root on the host. Defaults to false. Note that this field
cannot be set when spec.os.name is windows.
type: boolean
procMount:
description: procMount denotes the type of proc mount to
use for the containers. The default is DefaultProcMount
which uses the container runtime defaults for readonly
paths and masked paths. This requires the ProcMountType
feature flag to be enabled. Note that this field cannot
be set when spec.os.name is windows.
type: string
readOnlyRootFilesystem:
description: Whether this container has a read-only root
filesystem. Default is false. Note that this field cannot
be set when spec.os.name is windows.
type: boolean
runAsGroup:
description: The GID to run the entrypoint of the container
process. Uses runtime default if unset. May also be set
in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence. Note that this field cannot be set when
spec.os.name is windows.
format: int64
type: integer
runAsNonRoot:
description: Indicates that the container must run as a
non-root user. If true, the Kubelet will validate the
image at runtime to ensure that it does not run as UID
0 (root) and fail to start the container if it does. If
unset or false, no such validation will be performed.
May also be set in PodSecurityContext. If set in both
SecurityContext and PodSecurityContext, the value specified
in SecurityContext takes precedence.
type: boolean
runAsUser:
description: The UID to run the entrypoint of the container
process. Defaults to user specified in image metadata
if unspecified. May also be set in PodSecurityContext. If
set in both SecurityContext and PodSecurityContext, the
value specified in SecurityContext takes precedence. Note
that this field cannot be set when spec.os.name is windows.
format: int64
type: integer
seLinuxOptions:
description: The SELinux context to be applied to the container.
If unspecified, the container runtime will allocate a
random SELinux context for each container. May also be
set in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence. Note that this field cannot be set when
spec.os.name is windows.
properties:
level:
description: Level is SELinux level label that applies
to the container.
type: string
role:
description: Role is a SELinux role label that applies
to the container.
type: string
type:
description: Type is a SELinux type label that applies
to the container.
type: string
user:
description: User is a SELinux user label that applies
to the container.
type: string
type: object
seccompProfile:
description: The seccomp options to use by this container.
If seccomp options are provided at both the pod & container
level, the container options override the pod options.
Note that this field cannot be set when spec.os.name is
windows.
properties:
localhostProfile:
description: localhostProfile indicates a profile defined
in a file on the node should be used. The profile
must be preconfigured on the node to work. Must be
a descending path, relative to the kubelet's configured
seccomp profile location. Must be set if type is "Localhost".
Must NOT be set for any other type.
type: string
type:
description: "type indicates which kind of seccomp profile
will be applied. Valid options are: \n Localhost -
a profile defined in a file on the node should be
used. RuntimeDefault - the container runtime default
profile should be used. Unconfined - no profile should
be applied."
type: string
required:
- type
type: object
windowsOptions:
description: The Windows specific settings applied to all
containers. If unspecified, the options from the PodSecurityContext
will be used. If set in both SecurityContext and PodSecurityContext,
the value specified in SecurityContext takes precedence.
Note that this field cannot be set when spec.os.name is
linux.
properties:
gmsaCredentialSpec:
description: GMSACredentialSpec is where the GMSA admission
webhook (https://github.com/kubernetes-sigs/windows-gmsa)
inlines the contents of the GMSA credential spec named
by the GMSACredentialSpecName field.
type: string
gmsaCredentialSpecName:
description: GMSACredentialSpecName is the name of the
GMSA credential spec to use.
type: string
hostProcess:
description: HostProcess determines if a container should
be run as a 'Host Process' container. All of a Pod's
containers must have the same effective HostProcess
value (it is not allowed to have a mix of HostProcess
containers and non-HostProcess containers). In addition,
if HostProcess is true then HostNetwork must also
be set to true.
type: boolean
runAsUserName:
description: The UserName in Windows to run the entrypoint
of the container process. Defaults to the user specified
in image metadata if unspecified. May also be set
in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence.
type: string
type: object
type: object
startupProbe:
description: 'StartupProbe indicates that the Pod has successfully
initialized. If specified, no other probes are executed until
this completes successfully. If this probe fails, the Pod
will be restarted, just as if the livenessProbe failed. This
can be used to provide different probe parameters at the beginning
of a Pod''s lifecycle, when it might take a long time to load
data or warm a cache, than during steady-state operation.
This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
stdin:
description: Whether this container should allocate a buffer
for stdin in the container runtime. If this is not set, reads
from stdin in the container will always result in EOF. Default
is false.
type: boolean
stdinOnce:
description: Whether the container runtime should close the
stdin channel after it has been opened by a single attach.
When stdin is true the stdin stream will remain open across
multiple attach sessions. If stdinOnce is set to true, stdin
is opened on container start, is empty until the first client
attaches to stdin, and then remains open and accepts data
until the client disconnects, at which time stdin is closed
and remains closed until the container is restarted. If this
flag is false, a container processes that reads from stdin
will never receive an EOF. Default is false
type: boolean
terminationMessagePath:
description: 'Optional: Path at which the file to which the
container''s termination message will be written is mounted
into the container''s filesystem. Message written is intended
to be brief final status, such as an assertion failure message.
Will be truncated by the node if greater than 4096 bytes.
The total message length across all containers will be limited
to 12kb. Defaults to /dev/termination-log. Cannot be updated.'
type: string
terminationMessagePolicy:
description: Indicate how the termination message should be
populated. File will use the contents of terminationMessagePath
to populate the container status message on both success and
failure. FallbackToLogsOnError will use the last chunk of
container log output if the termination message file is empty
and the container exited with an error. The log output is
limited to 2048 bytes or 80 lines, whichever is smaller. Defaults
to File. Cannot be updated.
type: string
tty:
description: Whether this container should allocate a TTY for
itself, also requires 'stdin' to be true. Default is false.
type: boolean
volumeDevices:
description: volumeDevices is the list of block devices to be
used by the container.
items:
description: volumeDevice describes a mapping of a raw block
device within a container.
properties:
devicePath:
description: devicePath is the path inside of the container
that the device will be mapped to.
type: string
name:
description: name must match the name of a persistentVolumeClaim
in the pod
type: string
required:
- devicePath
- name
type: object
type: array
volumeMounts:
description: Pod volumes to mount into the container's filesystem.
Cannot be updated.
items:
description: VolumeMount describes a mounting of a Volume
within a container.
properties:
mountPath:
description: Path within the container at which the volume
should be mounted. Must not contain ':'.
type: string
mountPropagation:
description: mountPropagation determines how mounts are
propagated from the host to container and the other
way around. When not set, MountPropagationNone is used.
This field is beta in 1.10.
type: string
name:
description: This must match the Name of a Volume.
type: string
readOnly:
description: Mounted read-only if true, read-write otherwise
(false or unspecified). Defaults to false.
type: boolean
subPath:
description: Path within the volume from which the container's
volume should be mounted. Defaults to "" (volume's root).
type: string
subPathExpr:
description: Expanded path within the volume from which
the container's volume should be mounted. Behaves similarly
to SubPath but environment variable references $(VAR_NAME)
are expanded using the container's environment. Defaults
to "" (volume's root). SubPathExpr and SubPath are mutually
exclusive.
type: string
required:
- mountPath
- name
type: object
type: array
workingDir:
description: Container's working directory. If not specified,
the container runtime's default will be used, which might
be configured in the container image. Cannot be updated.
type: string
required:
- name
type: object
type: array
listenLocal:
description: ListenLocal makes the Alertmanager server listen on loopback,
so that it does not bind against the Pod IP. Note this is only for
the Alertmanager UI, not the gossip communication.
type: boolean
logFormat:
description: Log format for Alertmanager to be configured with.
enum:
- ""
- logfmt
- json
type: string
logLevel:
description: Log level for Alertmanager to be configured with.
enum:
- ""
- debug
- info
- warn
- error
type: string
minReadySeconds:
description: Minimum number of seconds for which a newly created pod
should be ready without any of its container crashing for it to
be considered available. Defaults to 0 (pod will be considered available
as soon as it is ready) This is an alpha field from kubernetes 1.22
until 1.24 which requires enabling the StatefulSetMinReadySeconds
feature gate.
format: int32
type: integer
nodeSelector:
additionalProperties:
type: string
description: Define which Nodes the Pods are scheduled on.
type: object
paused:
description: If set to true all actions on the underlying managed
objects are not goint to be performed, except for delete actions.
type: boolean
podMetadata:
description: "PodMetadata configures labels and annotations which
are propagated to the Alertmanager pods. \n The following items
are reserved and cannot be overridden: * \"alertmanager\" label,
set to the name of the Alertmanager instance. * \"app.kubernetes.io/instance\"
label, set to the name of the Alertmanager instance. * \"app.kubernetes.io/managed-by\"
label, set to \"prometheus-operator\". * \"app.kubernetes.io/name\"
label, set to \"alertmanager\". * \"app.kubernetes.io/version\"
label, set to the Alertmanager version. * \"kubectl.kubernetes.io/default-container\"
annotation, set to \"alertmanager\"."
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value map stored
with a resource that may be set by external tools to store and
retrieve arbitrary metadata. They are not queryable and should
be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be used to
organize and categorize (scope and select) objects. May match
selectors of replication controllers and services. More info:
http://kubernetes.io/docs/user-guide/labels'
type: object
name:
description: 'Name must be unique within a namespace. Is required
when creating resources, although some resources may allow a
client to request the generation of an appropriate name automatically.
Name is primarily intended for creation idempotence and configuration
definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
type: object
portName:
default: web
description: Port name used for the pods and governing service. Defaults
to `web`.
type: string
priorityClassName:
description: Priority class assigned to the Pods
type: string
replicas:
description: Size is the expected size of the alertmanager cluster.
The controller will eventually make the size of the running cluster
equal to the expected size.
format: int32
type: integer
resources:
description: Define resources requests and limits for single Pods.
properties:
claims:
description: "Claims lists the names of resources, defined in
spec.resourceClaims, that are used by this container. \n This
is an alpha field and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable. It can only be set
for containers."
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of one entry in pod.spec.resourceClaims
of the Pod where this field is used. It makes that resource
available inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount of compute resources
allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount of compute
resources required. If Requests is omitted for a container,
it defaults to Limits if that is explicitly specified, otherwise
to an implementation-defined value. Requests cannot exceed Limits.
More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
retention:
default: 120h
description: Time duration Alertmanager shall retain data for. Default
is '120h', and must match the regular expression `[0-9]+(ms|s|m|h)`
(milliseconds seconds minutes hours).
pattern: ^(0|(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
routePrefix:
description: The route prefix Alertmanager registers HTTP handlers
for. This is useful, if using ExternalURL and a proxy is rewriting
HTTP routes of a request, and the actual ExternalURL is still true,
but the server serves requests under a different route prefix. For
example for use with `kubectl proxy`.
type: string
secrets:
description: Secrets is a list of Secrets in the same namespace as
the Alertmanager object, which shall be mounted into the Alertmanager
Pods. Each Secret is added to the StatefulSet definition as a volume
named `secret-`. The Secrets are mounted into `/etc/alertmanager/secrets/`
in the 'alertmanager' container.
items:
type: string
type: array
securityContext:
description: SecurityContext holds pod-level security attributes and
common container settings. This defaults to the default PodSecurityContext.
properties:
fsGroup:
description: "A special supplemental group that applies to all
containers in a pod. Some volume types allow the Kubelet to
change the ownership of that volume to be owned by the pod:
\n 1. The owning GID will be the FSGroup 2. The setgid bit is
set (new files created in the volume will be owned by FSGroup)
3. The permission bits are OR'd with rw-rw---- \n If unset,
the Kubelet will not modify the ownership and permissions of
any volume. Note that this field cannot be set when spec.os.name
is windows."
format: int64
type: integer
fsGroupChangePolicy:
description: 'fsGroupChangePolicy defines behavior of changing
ownership and permission of the volume before being exposed
inside Pod. This field will only apply to volume types which
support fsGroup based ownership(and permissions). It will have
no effect on ephemeral volume types such as: secret, configmaps
and emptydir. Valid values are "OnRootMismatch" and "Always".
If not specified, "Always" is used. Note that this field cannot
be set when spec.os.name is windows.'
type: string
runAsGroup:
description: The GID to run the entrypoint of the container process.
Uses runtime default if unset. May also be set in SecurityContext. If
set in both SecurityContext and PodSecurityContext, the value
specified in SecurityContext takes precedence for that container.
Note that this field cannot be set when spec.os.name is windows.
format: int64
type: integer
runAsNonRoot:
description: Indicates that the container must run as a non-root
user. If true, the Kubelet will validate the image at runtime
to ensure that it does not run as UID 0 (root) and fail to start
the container if it does. If unset or false, no such validation
will be performed. May also be set in SecurityContext. If set
in both SecurityContext and PodSecurityContext, the value specified
in SecurityContext takes precedence.
type: boolean
runAsUser:
description: The UID to run the entrypoint of the container process.
Defaults to user specified in image metadata if unspecified.
May also be set in SecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence for that container. Note that this field cannot
be set when spec.os.name is windows.
format: int64
type: integer
seLinuxOptions:
description: The SELinux context to be applied to all containers.
If unspecified, the container runtime will allocate a random
SELinux context for each container. May also be set in SecurityContext. If
set in both SecurityContext and PodSecurityContext, the value
specified in SecurityContext takes precedence for that container.
Note that this field cannot be set when spec.os.name is windows.
properties:
level:
description: Level is SELinux level label that applies to
the container.
type: string
role:
description: Role is a SELinux role label that applies to
the container.
type: string
type:
description: Type is a SELinux type label that applies to
the container.
type: string
user:
description: User is a SELinux user label that applies to
the container.
type: string
type: object
seccompProfile:
description: The seccomp options to use by the containers in this
pod. Note that this field cannot be set when spec.os.name is
windows.
properties:
localhostProfile:
description: localhostProfile indicates a profile defined
in a file on the node should be used. The profile must be
preconfigured on the node to work. Must be a descending
path, relative to the kubelet's configured seccomp profile
location. Must be set if type is "Localhost". Must NOT be
set for any other type.
type: string
type:
description: "type indicates which kind of seccomp profile
will be applied. Valid options are: \n Localhost - a profile
defined in a file on the node should be used. RuntimeDefault
- the container runtime default profile should be used.
Unconfined - no profile should be applied."
type: string
required:
- type
type: object
supplementalGroups:
description: A list of groups applied to the first process run
in each container, in addition to the container's primary GID,
the fsGroup (if specified), and group memberships defined in
the container image for the uid of the container process. If
unspecified, no additional groups are added to any container.
Note that group memberships defined in the container image for
the uid of the container process are still effective, even if
they are not included in this list. Note that this field cannot
be set when spec.os.name is windows.
items:
format: int64
type: integer
type: array
sysctls:
description: Sysctls hold a list of namespaced sysctls used for
the pod. Pods with unsupported sysctls (by the container runtime)
might fail to launch. Note that this field cannot be set when
spec.os.name is windows.
items:
description: Sysctl defines a kernel parameter to be set
properties:
name:
description: Name of a property to set
type: string
value:
description: Value of a property to set
type: string
required:
- name
- value
type: object
type: array
windowsOptions:
description: The Windows specific settings applied to all containers.
If unspecified, the options within a container's SecurityContext
will be used. If set in both SecurityContext and PodSecurityContext,
the value specified in SecurityContext takes precedence. Note
that this field cannot be set when spec.os.name is linux.
properties:
gmsaCredentialSpec:
description: GMSACredentialSpec is where the GMSA admission
webhook (https://github.com/kubernetes-sigs/windows-gmsa)
inlines the contents of the GMSA credential spec named by
the GMSACredentialSpecName field.
type: string
gmsaCredentialSpecName:
description: GMSACredentialSpecName is the name of the GMSA
credential spec to use.
type: string
hostProcess:
description: HostProcess determines if a container should
be run as a 'Host Process' container. All of a Pod's containers
must have the same effective HostProcess value (it is not
allowed to have a mix of HostProcess containers and non-HostProcess
containers). In addition, if HostProcess is true then HostNetwork
must also be set to true.
type: boolean
runAsUserName:
description: The UserName in Windows to run the entrypoint
of the container process. Defaults to the user specified
in image metadata if unspecified. May also be set in PodSecurityContext.
If set in both SecurityContext and PodSecurityContext, the
value specified in SecurityContext takes precedence.
type: string
type: object
type: object
serviceAccountName:
description: ServiceAccountName is the name of the ServiceAccount
to use to run the Prometheus Pods.
type: string
sha:
description: 'SHA of Alertmanager container image to be deployed.
Defaults to the value of `version`. Similar to a tag, but the SHA
explicitly deploys an immutable container image. Version and Tag
are ignored if SHA is set. Deprecated: use ''image'' instead. The
image digest can be specified as part of the image URL.'
type: string
storage:
description: Storage is the definition of how storage will be used
by the Alertmanager instances.
properties:
disableMountSubPath:
description: 'Deprecated: subPath usage will be removed in a future
release.'
type: boolean
emptyDir:
description: 'EmptyDirVolumeSource to be used by the StatefulSet.
If specified, it takes precedence over `ephemeral` and `volumeClaimTemplate`.
More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir'
properties:
medium:
description: 'medium represents what type of storage medium
should back this directory. The default is "" which means
to use the node''s default medium. Must be an empty string
(default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
type: string
sizeLimit:
anyOf:
- type: integer
- type: string
description: 'sizeLimit is the total amount of local storage
required for this EmptyDir volume. The size limit is also
applicable for memory medium. The maximum usage on memory
medium EmptyDir would be the minimum value between the SizeLimit
specified here and the sum of memory limits of all containers
in a pod. The default is nil which means that the limit
is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
type: object
ephemeral:
description: 'EphemeralVolumeSource to be used by the StatefulSet.
This is a beta field in k8s 1.21 and GA in 1.15. For lower versions,
starting with k8s 1.19, it requires enabling the GenericEphemeralVolume
feature gate. More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes'
properties:
volumeClaimTemplate:
description: "Will be used to create a stand-alone PVC to
provision the volume. The pod in which this EphemeralVolumeSource
is embedded will be the owner of the PVC, i.e. the PVC will
be deleted together with the pod. The name of the PVC will
be `-` where `` is the
name from the `PodSpec.Volumes` array entry. Pod validation
will reject the pod if the concatenated name is not valid
for a PVC (for example, too long). \n An existing PVC with
that name that is not owned by the pod will *not* be used
for the pod to avoid using an unrelated volume by mistake.
Starting the pod is then blocked until the unrelated PVC
is removed. If such a pre-created PVC is meant to be used
by the pod, the PVC has to updated with an owner reference
to the pod once the pod exists. Normally this should not
be necessary, but it may be useful when manually reconstructing
a broken cluster. \n This field is read-only and no changes
will be made by Kubernetes to the PVC after it has been
created. \n Required, must not be nil."
properties:
metadata:
description: May contain labels and annotations that will
be copied into the PVC when creating it. No other fields
are allowed and will be rejected during validation.
type: object
spec:
description: The specification for the PersistentVolumeClaim.
The entire content is copied unchanged into the PVC
that gets created from this template. The same fields
as in a PersistentVolumeClaim are also valid here.
properties:
accessModes:
description: 'accessModes contains the desired access
modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
items:
type: string
type: array
dataSource:
description: 'dataSource field can be used to specify
either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
* An existing PVC (PersistentVolumeClaim) If the
provisioner or an external controller can support
the specified data source, it will create a new
volume based on the contents of the specified data
source. When the AnyVolumeDataSource feature gate
is enabled, dataSource contents will be copied to
dataSourceRef, and dataSourceRef contents will be
copied to dataSource when dataSourceRef.namespace
is not specified. If the namespace is specified,
then dataSourceRef will not be copied to dataSource.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API group.
For any other third-party types, APIGroup is
required.
type: string
kind:
description: Kind is the type of resource being
referenced
type: string
name:
description: Name is the name of resource being
referenced
type: string
required:
- kind
- name
type: object
x-kubernetes-map-type: atomic
dataSourceRef:
description: 'dataSourceRef specifies the object from
which to populate the volume with data, if a non-empty
volume is desired. This may be any object from a
non-empty API group (non core object) or a PersistentVolumeClaim
object. When this field is specified, volume binding
will only succeed if the type of the specified object
matches some installed volume populator or dynamic
provisioner. This field will replace the functionality
of the dataSource field and as such if both fields
are non-empty, they must have the same value. For
backwards compatibility, when namespace isn''t specified
in dataSourceRef, both fields (dataSource and dataSourceRef)
will be set to the same value automatically if one
of them is empty and the other is non-empty. When
namespace is specified in dataSourceRef, dataSource
isn''t set to the same value and must be empty.
There are three important differences between dataSource
and dataSourceRef: * While dataSource only allows
two specific types of objects, dataSourceRef allows
any non-core object, as well as PersistentVolumeClaim
objects. * While dataSource ignores disallowed values
(dropping them), dataSourceRef preserves all values,
and generates an error if a disallowed value is
specified. * While dataSource only allows local
objects, dataSourceRef allows objects in any namespaces.
(Beta) Using this field requires the AnyVolumeDataSource
feature gate to be enabled. (Alpha) Using the namespace
field of dataSourceRef requires the CrossNamespaceVolumeDataSource
feature gate to be enabled.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API group.
For any other third-party types, APIGroup is
required.
type: string
kind:
description: Kind is the type of resource being
referenced
type: string
name:
description: Name is the name of resource being
referenced
type: string
namespace:
description: Namespace is the namespace of resource
being referenced Note that when a namespace
is specified, a gateway.networking.k8s.io/ReferenceGrant
object is required in the referent namespace
to allow that namespace's owner to accept the
reference. See the ReferenceGrant documentation
for details. (Alpha) This field requires the
CrossNamespaceVolumeDataSource feature gate
to be enabled.
type: string
required:
- kind
- name
type: object
resources:
description: 'resources represents the minimum resources
the volume should have. If RecoverVolumeExpansionFailure
feature is enabled users are allowed to specify
resource requirements that are lower than previous
value but must still be higher than capacity recorded
in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
properties:
claims:
description: "Claims lists the names of resources,
defined in spec.resourceClaims, that are used
by this container. \n This is an alpha field
and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable. It
can only be set for containers."
items:
description: ResourceClaim references one entry
in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of
one entry in pod.spec.resourceClaims of
the Pod where this field is used. It makes
that resource available inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount
of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount
of compute resources required. If Requests is
omitted for a container, it defaults to Limits
if that is explicitly specified, otherwise to
an implementation-defined value. Requests cannot
exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
selector:
description: selector is a label query over volumes
to consider for binding.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement is
a selector that contains values, a key, and
an operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If
the operator is Exists or DoesNotExist,
the values array must be empty. This array
is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
storageClassName:
description: 'storageClassName is the name of the
StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
type: string
volumeMode:
description: volumeMode defines what type of volume
is required by the claim. Value of Filesystem is
implied when not included in claim spec.
type: string
volumeName:
description: volumeName is the binding reference to
the PersistentVolume backing this claim.
type: string
type: object
required:
- spec
type: object
type: object
volumeClaimTemplate:
description: Defines the PVC spec to be used by the Prometheus
StatefulSets. The easiest way to use a volume that cannot be
automatically provisioned is to use a label selector alongside
manually created PersistentVolumes.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this
representation of an object. Servers should convert recognized
schemas to the latest internal value, and may reject unrecognized
values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST
resource this object represents. Servers may infer this
from the endpoint the client submits requests to. Cannot
be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
description: EmbeddedMetadata contains metadata relevant to
an EmbeddedResource.
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value
map stored with a resource that may be set by external
tools to store and retrieve arbitrary metadata. They
are not queryable and should be preserved when modifying
objects. More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be
used to organize and categorize (scope and select) objects.
May match selectors of replication controllers and services.
More info: http://kubernetes.io/docs/user-guide/labels'
type: object
name:
description: 'Name must be unique within a namespace.
Is required when creating resources, although some resources
may allow a client to request the generation of an appropriate
name automatically. Name is primarily intended for creation
idempotence and configuration definition. Cannot be
updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
type: object
spec:
description: 'Defines the desired characteristics of a volume
requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
properties:
accessModes:
description: 'accessModes contains the desired access
modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
items:
type: string
type: array
dataSource:
description: 'dataSource field can be used to specify
either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
* An existing PVC (PersistentVolumeClaim) If the provisioner
or an external controller can support the specified
data source, it will create a new volume based on the
contents of the specified data source. When the AnyVolumeDataSource
feature gate is enabled, dataSource contents will be
copied to dataSourceRef, and dataSourceRef contents
will be copied to dataSource when dataSourceRef.namespace
is not specified. If the namespace is specified, then
dataSourceRef will not be copied to dataSource.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API group.
For any other third-party types, APIGroup is required.
type: string
kind:
description: Kind is the type of resource being referenced
type: string
name:
description: Name is the name of resource being referenced
type: string
required:
- kind
- name
type: object
x-kubernetes-map-type: atomic
dataSourceRef:
description: 'dataSourceRef specifies the object from
which to populate the volume with data, if a non-empty
volume is desired. This may be any object from a non-empty
API group (non core object) or a PersistentVolumeClaim
object. When this field is specified, volume binding
will only succeed if the type of the specified object
matches some installed volume populator or dynamic provisioner.
This field will replace the functionality of the dataSource
field and as such if both fields are non-empty, they
must have the same value. For backwards compatibility,
when namespace isn''t specified in dataSourceRef, both
fields (dataSource and dataSourceRef) will be set to
the same value automatically if one of them is empty
and the other is non-empty. When namespace is specified
in dataSourceRef, dataSource isn''t set to the same
value and must be empty. There are three important differences
between dataSource and dataSourceRef: * While dataSource
only allows two specific types of objects, dataSourceRef
allows any non-core object, as well as PersistentVolumeClaim
objects. * While dataSource ignores disallowed values
(dropping them), dataSourceRef preserves all values,
and generates an error if a disallowed value is specified.
* While dataSource only allows local objects, dataSourceRef
allows objects in any namespaces. (Beta) Using this
field requires the AnyVolumeDataSource feature gate
to be enabled. (Alpha) Using the namespace field of
dataSourceRef requires the CrossNamespaceVolumeDataSource
feature gate to be enabled.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API group.
For any other third-party types, APIGroup is required.
type: string
kind:
description: Kind is the type of resource being referenced
type: string
name:
description: Name is the name of resource being referenced
type: string
namespace:
description: Namespace is the namespace of resource
being referenced Note that when a namespace is specified,
a gateway.networking.k8s.io/ReferenceGrant object
is required in the referent namespace to allow that
namespace's owner to accept the reference. See the
ReferenceGrant documentation for details. (Alpha)
This field requires the CrossNamespaceVolumeDataSource
feature gate to be enabled.
type: string
required:
- kind
- name
type: object
resources:
description: 'resources represents the minimum resources
the volume should have. If RecoverVolumeExpansionFailure
feature is enabled users are allowed to specify resource
requirements that are lower than previous value but
must still be higher than capacity recorded in the status
field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
properties:
claims:
description: "Claims lists the names of resources,
defined in spec.resourceClaims, that are used by
this container. \n This is an alpha field and requires
enabling the DynamicResourceAllocation feature gate.
\n This field is immutable. It can only be set for
containers."
items:
description: ResourceClaim references one entry
in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of one
entry in pod.spec.resourceClaims of the Pod
where this field is used. It makes that resource
available inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount
of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount
of compute resources required. If Requests is omitted
for a container, it defaults to Limits if that is
explicitly specified, otherwise to an implementation-defined
value. Requests cannot exceed Limits. More info:
https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
selector:
description: selector is a label query over volumes to
consider for binding.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector
that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In,
NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values.
If the operator is In or NotIn, the values
array must be non-empty. If the operator is
Exists or DoesNotExist, the values array must
be empty. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field
is "key", the operator is "In", and the values array
contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
storageClassName:
description: 'storageClassName is the name of the StorageClass
required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
type: string
volumeMode:
description: volumeMode defines what type of volume is
required by the claim. Value of Filesystem is implied
when not included in claim spec.
type: string
volumeName:
description: volumeName is the binding reference to the
PersistentVolume backing this claim.
type: string
type: object
status:
description: 'Deprecated: this field is never set.'
properties:
accessModes:
description: 'accessModes contains the actual access modes
the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
items:
type: string
type: array
allocatedResourceStatuses:
additionalProperties:
description: When a controller receives persistentvolume
claim update with ClaimResourceStatus for a resource
that it does not recognizes, then it should ignore
that update and let other controllers handle it.
type: string
description: "allocatedResourceStatuses stores status
of resource being resized for the given PVC. Key names
follow standard Kubernetes label syntax. Valid values
are either: * Un-prefixed keys: - storage - the capacity
of the volume. * Custom resources must use implementation-defined
prefixed names such as \"example.com/my-custom-resource\"
Apart from above values - keys that are unprefixed or
have kubernetes.io prefix are considered reserved and
hence may not be used. \n ClaimResourceStatus can be
in any of following states: - ControllerResizeInProgress:
State set when resize controller starts resizing the
volume in control-plane. - ControllerResizeFailed: State
set when resize has failed in resize controller with
a terminal error. - NodeResizePending: State set when
resize controller has finished resizing the volume but
further resizing of volume is needed on the node. -
NodeResizeInProgress: State set when kubelet starts
resizing the volume. - NodeResizeFailed: State set when
resizing has failed in kubelet with a terminal error.
Transient errors don't set NodeResizeFailed. For example:
if expanding a PVC for more capacity - this field can
be one of the following states: - pvc.status.allocatedResourceStatus['storage']
= \"ControllerResizeInProgress\" - pvc.status.allocatedResourceStatus['storage']
= \"ControllerResizeFailed\" - pvc.status.allocatedResourceStatus['storage']
= \"NodeResizePending\" - pvc.status.allocatedResourceStatus['storage']
= \"NodeResizeInProgress\" - pvc.status.allocatedResourceStatus['storage']
= \"NodeResizeFailed\" When this field is not set, it
means that no resize operation is in progress for the
given PVC. \n A controller that receives PVC update
with previously unknown resourceName or ClaimResourceStatus
should ignore the update for the purpose it was designed.
For example - a controller that only is responsible
for resizing capacity of the volume, should ignore PVC
updates that change other valid resources associated
with PVC. \n This is an alpha field and requires enabling
RecoverVolumeExpansionFailure feature."
type: object
x-kubernetes-map-type: granular
allocatedResources:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: "allocatedResources tracks the resources
allocated to a PVC including its capacity. Key names
follow standard Kubernetes label syntax. Valid values
are either: * Un-prefixed keys: - storage - the capacity
of the volume. * Custom resources must use implementation-defined
prefixed names such as \"example.com/my-custom-resource\"
Apart from above values - keys that are unprefixed or
have kubernetes.io prefix are considered reserved and
hence may not be used. \n Capacity reported here may
be larger than the actual capacity when a volume expansion
operation is requested. For storage quota, the larger
value from allocatedResources and PVC.spec.resources
is used. If allocatedResources is not set, PVC.spec.resources
alone is used for quota calculation. If a volume expansion
capacity request is lowered, allocatedResources is only
lowered if there are no expansion operations in progress
and if the actual volume capacity is equal or lower
than the requested capacity. \n A controller that receives
PVC update with previously unknown resourceName should
ignore the update for the purpose it was designed. For
example - a controller that only is responsible for
resizing capacity of the volume, should ignore PVC updates
that change other valid resources associated with PVC.
\n This is an alpha field and requires enabling RecoverVolumeExpansionFailure
feature."
type: object
capacity:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: capacity represents the actual resources
of the underlying volume.
type: object
conditions:
description: conditions is the current Condition of persistent
volume claim. If underlying persistent volume is being
resized then the Condition will be set to 'ResizeStarted'.
items:
description: PersistentVolumeClaimCondition contains
details about state of pvc
properties:
lastProbeTime:
description: lastProbeTime is the time we probed
the condition.
format: date-time
type: string
lastTransitionTime:
description: lastTransitionTime is the time the
condition transitioned from one status to another.
format: date-time
type: string
message:
description: message is the human-readable message
indicating details about last transition.
type: string
reason:
description: reason is a unique, this should be
a short, machine understandable string that gives
the reason for condition's last transition. If
it reports "ResizeStarted" that means the underlying
persistent volume is being resized.
type: string
status:
type: string
type:
description: PersistentVolumeClaimConditionType
is a valid value of PersistentVolumeClaimCondition.Type
type: string
required:
- status
- type
type: object
type: array
phase:
description: phase represents the current phase of PersistentVolumeClaim.
type: string
type: object
type: object
type: object
tag:
description: 'Tag of Alertmanager container image to be deployed.
Defaults to the value of `version`. Version is ignored if Tag is
set. Deprecated: use ''image'' instead. The image tag can be specified
as part of the image URL.'
type: string
tolerations:
description: If specified, the pod's tolerations.
items:
description: The pod this Toleration is attached to tolerates any
taint that matches the triple using the matching
operator .
properties:
effect:
description: Effect indicates the taint effect to match. Empty
means match all taint effects. When specified, allowed values
are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: Key is the taint key that the toleration applies
to. Empty means match all taint keys. If the key is empty,
operator must be Exists; this combination means to match all
values and all keys.
type: string
operator:
description: Operator represents a key's relationship to the
value. Valid operators are Exists and Equal. Defaults to Equal.
Exists is equivalent to wildcard for value, so that a pod
can tolerate all taints of a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period of time
the toleration (which must be of effect NoExecute, otherwise
this field is ignored) tolerates the taint. By default, it
is not set, which means tolerate the taint forever (do not
evict). Zero and negative values will be treated as 0 (evict
immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration matches
to. If the operator is Exists, the value should be empty,
otherwise just a regular string.
type: string
type: object
type: array
topologySpreadConstraints:
description: If specified, the pod's topology spread constraints.
items:
description: TopologySpreadConstraint specifies how to spread matching
pods among the given topology.
properties:
labelSelector:
description: LabelSelector is used to find matching pods. Pods
that match this label selector are counted to determine the
number of pods in their corresponding topology domain.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector
that contains values, a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In, NotIn,
Exists and DoesNotExist.
type: string
values:
description: values is an array of string values.
If the operator is In or NotIn, the values array
must be non-empty. If the operator is Exists or
DoesNotExist, the values array must be empty. This
array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field is
"key", the operator is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
matchLabelKeys:
description: "MatchLabelKeys is a set of pod label keys to select
the pods over which spreading will be calculated. The keys
are used to lookup values from the incoming pod labels, those
key-value labels are ANDed with labelSelector to select the
group of existing pods over which spreading will be calculated
for the incoming pod. The same key is forbidden to exist in
both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot
be set when LabelSelector isn't set. Keys that don't exist
in the incoming pod labels will be ignored. A null or empty
list means only match against labelSelector. \n This is a
beta field and requires the MatchLabelKeysInPodTopologySpread
feature gate to be enabled (enabled by default)."
items:
type: string
type: array
x-kubernetes-list-type: atomic
maxSkew:
description: 'MaxSkew describes the degree to which pods may
be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`,
it is the maximum permitted difference between the number
of matching pods in the target topology and the global minimum.
The global minimum is the minimum number of matching pods
in an eligible domain or zero if the number of eligible domains
is less than MinDomains. For example, in a 3-zone cluster,
MaxSkew is set to 1, and pods with the same labelSelector
spread as 2/2/1: In this case, the global minimum is 1. |
zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew
is 1, incoming pod can only be scheduled to zone3 to become
2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1)
on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming
pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`,
it is used to give higher precedence to topologies that satisfy
it. It''s a required field. Default value is 1 and 0 is not
allowed.'
format: int32
type: integer
minDomains:
description: "MinDomains indicates a minimum number of eligible
domains. When the number of eligible domains with matching
topology keys is less than minDomains, Pod Topology Spread
treats \"global minimum\" as 0, and then the calculation of
Skew is performed. And when the number of eligible domains
with matching topology keys equals or greater than minDomains,
this value has no effect on scheduling. As a result, when
the number of eligible domains is less than minDomains, scheduler
won't schedule more than maxSkew Pods to those domains. If
value is nil, the constraint behaves as if MinDomains is equal
to 1. Valid values are integers greater than 0. When value
is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For
example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains
is set to 5 and pods with the same labelSelector spread as
2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P |
The number of domains is less than 5(MinDomains), so \"global
minimum\" is treated as 0. In this situation, new pod with
the same labelSelector cannot be scheduled, because computed
skew will be 3(3 - 0) if new Pod is scheduled to any of the
three zones, it will violate MaxSkew. \n This is a beta field
and requires the MinDomainsInPodTopologySpread feature gate
to be enabled (enabled by default)."
format: int32
type: integer
nodeAffinityPolicy:
description: "NodeAffinityPolicy indicates how we will treat
Pod's nodeAffinity/nodeSelector when calculating pod topology
spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector
are included in the calculations. - Ignore: nodeAffinity/nodeSelector
are ignored. All nodes are included in the calculations. \n
If this value is nil, the behavior is equivalent to the Honor
policy. This is a beta-level feature default enabled by the
NodeInclusionPolicyInPodTopologySpread feature flag."
type: string
nodeTaintsPolicy:
description: "NodeTaintsPolicy indicates how we will treat node
taints when calculating pod topology spread skew. Options
are: - Honor: nodes without taints, along with tainted nodes
for which the incoming pod has a toleration, are included.
- Ignore: node taints are ignored. All nodes are included.
\n If this value is nil, the behavior is equivalent to the
Ignore policy. This is a beta-level feature default enabled
by the NodeInclusionPolicyInPodTopologySpread feature flag."
type: string
topologyKey:
description: TopologyKey is the key of node labels. Nodes that
have a label with this key and identical values are considered
to be in the same topology. We consider each
as a "bucket", and try to put balanced number of pods into
each bucket. We define a domain as a particular instance of
a topology. Also, we define an eligible domain as a domain
whose nodes meet the requirements of nodeAffinityPolicy and
nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname",
each Node is a domain of that topology. And, if TopologyKey
is "topology.kubernetes.io/zone", each zone is a domain of
that topology. It's a required field.
type: string
whenUnsatisfiable:
description: 'WhenUnsatisfiable indicates how to deal with a
pod if it doesn''t satisfy the spread constraint. - DoNotSchedule
(default) tells the scheduler not to schedule it. - ScheduleAnyway
tells the scheduler to schedule the pod in any location, but
giving higher precedence to topologies that would help reduce
the skew. A constraint is considered "Unsatisfiable" for an
incoming pod if and only if every possible node assignment
for that pod would violate "MaxSkew" on some topology. For
example, in a 3-zone cluster, MaxSkew is set to 1, and pods
with the same labelSelector spread as 3/1/1: | zone1 | zone2
| zone3 | | P P P | P | P | If WhenUnsatisfiable is
set to DoNotSchedule, incoming pod can only be scheduled to
zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on
zone2(zone3) satisfies MaxSkew(1). In other words, the cluster
can still be imbalanced, but scheduler won''t make it *more*
imbalanced. It''s a required field.'
type: string
required:
- maxSkew
- topologyKey
- whenUnsatisfiable
type: object
type: array
version:
description: Version the cluster should be on.
type: string
volumeMounts:
description: VolumeMounts allows configuration of additional VolumeMounts
on the output StatefulSet definition. VolumeMounts specified will
be appended to other VolumeMounts in the alertmanager container,
that are generated as a result of StorageSpec objects.
items:
description: VolumeMount describes a mounting of a Volume within
a container.
properties:
mountPath:
description: Path within the container at which the volume should
be mounted. Must not contain ':'.
type: string
mountPropagation:
description: mountPropagation determines how mounts are propagated
from the host to container and the other way around. When
not set, MountPropagationNone is used. This field is beta
in 1.10.
type: string
name:
description: This must match the Name of a Volume.
type: string
readOnly:
description: Mounted read-only if true, read-write otherwise
(false or unspecified). Defaults to false.
type: boolean
subPath:
description: Path within the volume from which the container's
volume should be mounted. Defaults to "" (volume's root).
type: string
subPathExpr:
description: Expanded path within the volume from which the
container's volume should be mounted. Behaves similarly to
SubPath but environment variable references $(VAR_NAME) are
expanded using the container's environment. Defaults to ""
(volume's root). SubPathExpr and SubPath are mutually exclusive.
type: string
required:
- mountPath
- name
type: object
type: array
volumes:
description: Volumes allows configuration of additional volumes on
the output StatefulSet definition. Volumes specified will be appended
to other volumes that are generated as a result of StorageSpec objects.
items:
description: Volume represents a named volume in a pod that may
be accessed by any container in the pod.
properties:
awsElasticBlockStore:
description: 'awsElasticBlockStore represents an AWS Disk resource
that is attached to a kubelet''s host machine and then exposed
to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore'
properties:
fsType:
description: 'fsType is the filesystem type of the volume
that you want to mount. Tip: Ensure that the filesystem
type is supported by the host operating system. Examples:
"ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
TODO: how do we prevent errors in the filesystem from
compromising the machine'
type: string
partition:
description: 'partition is the partition in the volume that
you want to mount. If omitted, the default is to mount
by volume name. Examples: For volume /dev/sda1, you specify
the partition as "1". Similarly, the volume partition
for /dev/sda is "0" (or you can leave the property empty).'
format: int32
type: integer
readOnly:
description: 'readOnly value true will force the readOnly
setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore'
type: boolean
volumeID:
description: 'volumeID is unique ID of the persistent disk
resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore'
type: string
required:
- volumeID
type: object
azureDisk:
description: azureDisk represents an Azure Data Disk mount on
the host and bind mount to the pod.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode: None,
Read Only, Read Write.'
type: string
diskName:
description: diskName is the Name of the data disk in the
blob storage
type: string
diskURI:
description: diskURI is the URI of data disk in the blob
storage
type: string
fsType:
description: fsType is Filesystem type to mount. Must be
a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified.
type: string
kind:
description: 'kind expected values are Shared: multiple
blob disks per storage account Dedicated: single blob
disk per storage account Managed: azure managed data
disk (only in managed availability set). defaults to shared'
type: string
readOnly:
description: readOnly Defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
type: boolean
required:
- diskName
- diskURI
type: object
azureFile:
description: azureFile represents an Azure File Service mount
on the host and bind mount to the pod.
properties:
readOnly:
description: readOnly defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
type: boolean
secretName:
description: secretName is the name of secret that contains
Azure Storage Account Name and Key
type: string
shareName:
description: shareName is the azure share Name
type: string
required:
- secretName
- shareName
type: object
cephfs:
description: cephFS represents a Ceph FS mount on the host that
shares a pod's lifetime
properties:
monitors:
description: 'monitors is Required: Monitors is a collection
of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it'
items:
type: string
type: array
path:
description: 'path is Optional: Used as the mounted root,
rather than the full Ceph tree, default is /'
type: string
readOnly:
description: 'readOnly is Optional: Defaults to false (read/write).
ReadOnly here will force the ReadOnly setting in VolumeMounts.
More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it'
type: boolean
secretFile:
description: 'secretFile is Optional: SecretFile is the
path to key ring for User, default is /etc/ceph/user.secret
More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it'
type: string
secretRef:
description: 'secretRef is Optional: SecretRef is reference
to the authentication secret for User, default is empty.
More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it'
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
user:
description: 'user is optional: User is the rados user name,
default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it'
type: string
required:
- monitors
type: object
cinder:
description: 'cinder represents a cinder volume attached and
mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md'
properties:
fsType:
description: 'fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Examples: "ext4", "xfs", "ntfs". Implicitly inferred to
be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md'
type: string
readOnly:
description: 'readOnly defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md'
type: boolean
secretRef:
description: 'secretRef is optional: points to a secret
object containing parameters used to connect to OpenStack.'
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
volumeID:
description: 'volumeID used to identify the volume in cinder.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md'
type: string
required:
- volumeID
type: object
configMap:
description: configMap represents a configMap that should populate
this volume
properties:
defaultMode:
description: 'defaultMode is optional: mode bits used to
set permissions on created files by default. Must be an
octal value between 0000 and 0777 or a decimal value between
0 and 511. YAML accepts both octal and decimal values,
JSON requires decimal values for mode bits. Defaults to
0644. Directories within the path are not affected by
this setting. This might be in conflict with other options
that affect the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
items:
description: items if unspecified, each key-value pair in
the Data field of the referenced ConfigMap will be projected
into the volume as a file whose name is the key and content
is the value. If specified, the listed keys will be projected
into the specified paths, and unlisted keys will not be
present. If a key is specified which is not present in
the ConfigMap, the volume setup will error unless it is
marked optional. Paths must be relative and may not contain
the '..' path or start with '..'.
items:
description: Maps a string key to a path within a volume.
properties:
key:
description: key is the key to project.
type: string
mode:
description: 'mode is Optional: mode bits used to
set permissions on this file. Must be an octal value
between 0000 and 0777 or a decimal value between
0 and 511. YAML accepts both octal and decimal values,
JSON requires decimal values for mode bits. If not
specified, the volume defaultMode will be used.
This might be in conflict with other options that
affect the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
path:
description: path is the relative path of the file
to map the key to. May not be an absolute path.
May not contain the path element '..'. May not start
with the string '..'.
type: string
required:
- key
- path
type: object
type: array
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: optional specify whether the ConfigMap or its
keys must be defined
type: boolean
type: object
x-kubernetes-map-type: atomic
csi:
description: csi (Container Storage Interface) represents ephemeral
storage that is handled by certain external CSI drivers (Beta
feature).
properties:
driver:
description: driver is the name of the CSI driver that handles
this volume. Consult with your admin for the correct name
as registered in the cluster.
type: string
fsType:
description: fsType to mount. Ex. "ext4", "xfs", "ntfs".
If not provided, the empty value is passed to the associated
CSI driver which will determine the default filesystem
to apply.
type: string
nodePublishSecretRef:
description: nodePublishSecretRef is a reference to the
secret object containing sensitive information to pass
to the CSI driver to complete the CSI NodePublishVolume
and NodeUnpublishVolume calls. This field is optional,
and may be empty if no secret is required. If the secret
object contains more than one secret, all secret references
are passed.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
readOnly:
description: readOnly specifies a read-only configuration
for the volume. Defaults to false (read/write).
type: boolean
volumeAttributes:
additionalProperties:
type: string
description: volumeAttributes stores driver-specific properties
that are passed to the CSI driver. Consult your driver's
documentation for supported values.
type: object
required:
- driver
type: object
downwardAPI:
description: downwardAPI represents downward API about the pod
that should populate this volume
properties:
defaultMode:
description: 'Optional: mode bits to use on created files
by default. Must be a Optional: mode bits used to set
permissions on created files by default. Must be an octal
value between 0000 and 0777 or a decimal value between
0 and 511. YAML accepts both octal and decimal values,
JSON requires decimal values for mode bits. Defaults to
0644. Directories within the path are not affected by
this setting. This might be in conflict with other options
that affect the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
items:
description: Items is a list of downward API volume file
items:
description: DownwardAPIVolumeFile represents information
to create the file containing the pod field
properties:
fieldRef:
description: 'Required: Selects a field of the pod:
only annotations, labels, name and namespace are
supported.'
properties:
apiVersion:
description: Version of the schema the FieldPath
is written in terms of, defaults to "v1".
type: string
fieldPath:
description: Path of the field to select in the
specified API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
mode:
description: 'Optional: mode bits used to set permissions
on this file, must be an octal value between 0000
and 0777 or a decimal value between 0 and 511. YAML
accepts both octal and decimal values, JSON requires
decimal values for mode bits. If not specified,
the volume defaultMode will be used. This might
be in conflict with other options that affect the
file mode, like fsGroup, and the result can be other
mode bits set.'
format: int32
type: integer
path:
description: 'Required: Path is the relative path
name of the file to be created. Must not be absolute
or contain the ''..'' path. Must be utf-8 encoded.
The first item of the relative path must not start
with ''..'''
type: string
resourceFieldRef:
description: 'Selects a resource of the container:
only resources limits and requests (limits.cpu,
limits.memory, requests.cpu and requests.memory)
are currently supported.'
properties:
containerName:
description: 'Container name: required for volumes,
optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format of the
exposed resources, defaults to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
description: 'Required: resource to select'
type: string
required:
- resource
type: object
x-kubernetes-map-type: atomic
required:
- path
type: object
type: array
type: object
emptyDir:
description: 'emptyDir represents a temporary directory that
shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
properties:
medium:
description: 'medium represents what type of storage medium
should back this directory. The default is "" which means
to use the node''s default medium. Must be an empty string
(default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
type: string
sizeLimit:
anyOf:
- type: integer
- type: string
description: 'sizeLimit is the total amount of local storage
required for this EmptyDir volume. The size limit is also
applicable for memory medium. The maximum usage on memory
medium EmptyDir would be the minimum value between the
SizeLimit specified here and the sum of memory limits
of all containers in a pod. The default is nil which means
that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
type: object
ephemeral:
description: "ephemeral represents a volume that is handled
by a cluster storage driver. The volume's lifecycle is tied
to the pod that defines it - it will be created before the
pod starts, and deleted when the pod is removed. \n Use this
if: a) the volume is only needed while the pod runs, b) features
of normal volumes like restoring from snapshot or capacity
tracking are needed, c) the storage driver is specified through
a storage class, and d) the storage driver supports dynamic
volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource
for more information on the connection between this volume
type and PersistentVolumeClaim). \n Use PersistentVolumeClaim
or one of the vendor-specific APIs for volumes that persist
for longer than the lifecycle of an individual pod. \n Use
CSI for light-weight local ephemeral volumes if the CSI driver
is meant to be used that way - see the documentation of the
driver for more information. \n A pod can use both types of
ephemeral volumes and persistent volumes at the same time."
properties:
volumeClaimTemplate:
description: "Will be used to create a stand-alone PVC to
provision the volume. The pod in which this EphemeralVolumeSource
is embedded will be the owner of the PVC, i.e. the PVC
will be deleted together with the pod. The name of the
PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry.
Pod validation will reject the pod if the concatenated
name is not valid for a PVC (for example, too long). \n
An existing PVC with that name that is not owned by the
pod will *not* be used for the pod to avoid using an unrelated
volume by mistake. Starting the pod is then blocked until
the unrelated PVC is removed. If such a pre-created PVC
is meant to be used by the pod, the PVC has to updated
with an owner reference to the pod once the pod exists.
Normally this should not be necessary, but it may be useful
when manually reconstructing a broken cluster. \n This
field is read-only and no changes will be made by Kubernetes
to the PVC after it has been created. \n Required, must
not be nil."
properties:
metadata:
description: May contain labels and annotations that
will be copied into the PVC when creating it. No other
fields are allowed and will be rejected during validation.
type: object
spec:
description: The specification for the PersistentVolumeClaim.
The entire content is copied unchanged into the PVC
that gets created from this template. The same fields
as in a PersistentVolumeClaim are also valid here.
properties:
accessModes:
description: 'accessModes contains the desired access
modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
items:
type: string
type: array
dataSource:
description: 'dataSource field can be used to specify
either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
* An existing PVC (PersistentVolumeClaim) If the
provisioner or an external controller can support
the specified data source, it will create a new
volume based on the contents of the specified
data source. When the AnyVolumeDataSource feature
gate is enabled, dataSource contents will be copied
to dataSourceRef, and dataSourceRef contents will
be copied to dataSource when dataSourceRef.namespace
is not specified. If the namespace is specified,
then dataSourceRef will not be copied to dataSource.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API
group. For any other third-party types, APIGroup
is required.
type: string
kind:
description: Kind is the type of resource being
referenced
type: string
name:
description: Name is the name of resource being
referenced
type: string
required:
- kind
- name
type: object
x-kubernetes-map-type: atomic
dataSourceRef:
description: 'dataSourceRef specifies the object
from which to populate the volume with data, if
a non-empty volume is desired. This may be any
object from a non-empty API group (non core object)
or a PersistentVolumeClaim object. When this field
is specified, volume binding will only succeed
if the type of the specified object matches some
installed volume populator or dynamic provisioner.
This field will replace the functionality of the
dataSource field and as such if both fields are
non-empty, they must have the same value. For
backwards compatibility, when namespace isn''t
specified in dataSourceRef, both fields (dataSource
and dataSourceRef) will be set to the same value
automatically if one of them is empty and the
other is non-empty. When namespace is specified
in dataSourceRef, dataSource isn''t set to the
same value and must be empty. There are three
important differences between dataSource and dataSourceRef:
* While dataSource only allows two specific types
of objects, dataSourceRef allows any non-core
object, as well as PersistentVolumeClaim objects.
* While dataSource ignores disallowed values (dropping
them), dataSourceRef preserves all values, and
generates an error if a disallowed value is specified.
* While dataSource only allows local objects,
dataSourceRef allows objects in any namespaces.
(Beta) Using this field requires the AnyVolumeDataSource
feature gate to be enabled. (Alpha) Using the
namespace field of dataSourceRef requires the
CrossNamespaceVolumeDataSource feature gate to
be enabled.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API
group. For any other third-party types, APIGroup
is required.
type: string
kind:
description: Kind is the type of resource being
referenced
type: string
name:
description: Name is the name of resource being
referenced
type: string
namespace:
description: Namespace is the namespace of resource
being referenced Note that when a namespace
is specified, a gateway.networking.k8s.io/ReferenceGrant
object is required in the referent namespace
to allow that namespace's owner to accept
the reference. See the ReferenceGrant documentation
for details. (Alpha) This field requires the
CrossNamespaceVolumeDataSource feature gate
to be enabled.
type: string
required:
- kind
- name
type: object
resources:
description: 'resources represents the minimum resources
the volume should have. If RecoverVolumeExpansionFailure
feature is enabled users are allowed to specify
resource requirements that are lower than previous
value but must still be higher than capacity recorded
in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
properties:
claims:
description: "Claims lists the names of resources,
defined in spec.resourceClaims, that are used
by this container. \n This is an alpha field
and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable.
It can only be set for containers."
items:
description: ResourceClaim references one
entry in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name
of one entry in pod.spec.resourceClaims
of the Pod where this field is used.
It makes that resource available inside
a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount
of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum
amount of compute resources required. If Requests
is omitted for a container, it defaults to
Limits if that is explicitly specified, otherwise
to an implementation-defined value. Requests
cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
selector:
description: selector is a label query over volumes
to consider for binding.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
storageClassName:
description: 'storageClassName is the name of the
StorageClass required by the claim. More info:
https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
type: string
volumeMode:
description: volumeMode defines what type of volume
is required by the claim. Value of Filesystem
is implied when not included in claim spec.
type: string
volumeName:
description: volumeName is the binding reference
to the PersistentVolume backing this claim.
type: string
type: object
required:
- spec
type: object
type: object
fc:
description: fc represents a Fibre Channel resource that is
attached to a kubelet's host machine and then exposed to the
pod.
properties:
fsType:
description: 'fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified. TODO: how do we prevent errors in the
filesystem from compromising the machine'
type: string
lun:
description: 'lun is Optional: FC target lun number'
format: int32
type: integer
readOnly:
description: 'readOnly is Optional: Defaults to false (read/write).
ReadOnly here will force the ReadOnly setting in VolumeMounts.'
type: boolean
targetWWNs:
description: 'targetWWNs is Optional: FC target worldwide
names (WWNs)'
items:
type: string
type: array
wwids:
description: 'wwids Optional: FC volume world wide identifiers
(wwids) Either wwids or combination of targetWWNs and
lun must be set, but not both simultaneously.'
items:
type: string
type: array
type: object
flexVolume:
description: flexVolume represents a generic volume resource
that is provisioned/attached using an exec based plugin.
properties:
driver:
description: driver is the name of the driver to use for
this volume.
type: string
fsType:
description: fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". The default filesystem depends
on FlexVolume script.
type: string
options:
additionalProperties:
type: string
description: 'options is Optional: this field holds extra
command options if any.'
type: object
readOnly:
description: 'readOnly is Optional: defaults to false (read/write).
ReadOnly here will force the ReadOnly setting in VolumeMounts.'
type: boolean
secretRef:
description: 'secretRef is Optional: secretRef is reference
to the secret object containing sensitive information
to pass to the plugin scripts. This may be empty if no
secret object is specified. If the secret object contains
more than one secret, all secrets are passed to the plugin
scripts.'
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
required:
- driver
type: object
flocker:
description: flocker represents a Flocker volume attached to
a kubelet's host machine. This depends on the Flocker control
service being running
properties:
datasetName:
description: datasetName is Name of the dataset stored as
metadata -> name on the dataset for Flocker should be
considered as deprecated
type: string
datasetUUID:
description: datasetUUID is the UUID of the dataset. This
is unique identifier of a Flocker dataset
type: string
type: object
gcePersistentDisk:
description: 'gcePersistentDisk represents a GCE Disk resource
that is attached to a kubelet''s host machine and then exposed
to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk'
properties:
fsType:
description: 'fsType is filesystem type of the volume that
you want to mount. Tip: Ensure that the filesystem type
is supported by the host operating system. Examples: "ext4",
"xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
TODO: how do we prevent errors in the filesystem from
compromising the machine'
type: string
partition:
description: 'partition is the partition in the volume that
you want to mount. If omitted, the default is to mount
by volume name. Examples: For volume /dev/sda1, you specify
the partition as "1". Similarly, the volume partition
for /dev/sda is "0" (or you can leave the property empty).
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk'
format: int32
type: integer
pdName:
description: 'pdName is unique name of the PD resource in
GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk'
type: string
readOnly:
description: 'readOnly here will force the ReadOnly setting
in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk'
type: boolean
required:
- pdName
type: object
gitRepo:
description: 'gitRepo represents a git repository at a particular
revision. DEPRECATED: GitRepo is deprecated. To provision
a container with a git repo, mount an EmptyDir into an InitContainer
that clones the repo using git, then mount the EmptyDir into
the Pod''s container.'
properties:
directory:
description: directory is the target directory name. Must
not contain or start with '..'. If '.' is supplied, the
volume directory will be the git repository. Otherwise,
if specified, the volume will contain the git repository
in the subdirectory with the given name.
type: string
repository:
description: repository is the URL
type: string
revision:
description: revision is the commit hash for the specified
revision.
type: string
required:
- repository
type: object
glusterfs:
description: 'glusterfs represents a Glusterfs mount on the
host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md'
properties:
endpoints:
description: 'endpoints is the endpoint name that details
Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod'
type: string
path:
description: 'path is the Glusterfs volume path. More info:
https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod'
type: string
readOnly:
description: 'readOnly here will force the Glusterfs volume
to be mounted with read-only permissions. Defaults to
false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod'
type: boolean
required:
- endpoints
- path
type: object
hostPath:
description: 'hostPath represents a pre-existing file or directory
on the host machine that is directly exposed to the container.
This is generally used for system agents or other privileged
things that are allowed to see the host machine. Most containers
will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
--- TODO(jonesdl) We need to restrict who can use host directory
mounts and who can/can not mount host directories as read/write.'
properties:
path:
description: 'path of the directory on the host. If the
path is a symlink, it will follow the link to the real
path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath'
type: string
type:
description: 'type for HostPath Volume Defaults to "" More
info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath'
type: string
required:
- path
type: object
iscsi:
description: 'iscsi represents an ISCSI Disk resource that is
attached to a kubelet''s host machine and then exposed to
the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md'
properties:
chapAuthDiscovery:
description: chapAuthDiscovery defines whether support iSCSI
Discovery CHAP authentication
type: boolean
chapAuthSession:
description: chapAuthSession defines whether support iSCSI
Session CHAP authentication
type: boolean
fsType:
description: 'fsType is the filesystem type of the volume
that you want to mount. Tip: Ensure that the filesystem
type is supported by the host operating system. Examples:
"ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
TODO: how do we prevent errors in the filesystem from
compromising the machine'
type: string
initiatorName:
description: initiatorName is the custom iSCSI Initiator
Name. If initiatorName is specified with iscsiInterface
simultaneously, new iSCSI interface : will be created for the connection.
type: string
iqn:
description: iqn is the target iSCSI Qualified Name.
type: string
iscsiInterface:
description: iscsiInterface is the interface Name that uses
an iSCSI transport. Defaults to 'default' (tcp).
type: string
lun:
description: lun represents iSCSI Target Lun number.
format: int32
type: integer
portals:
description: portals is the iSCSI Target Portal List. The
portal is either an IP or ip_addr:port if the port is
other than default (typically TCP ports 860 and 3260).
items:
type: string
type: array
readOnly:
description: readOnly here will force the ReadOnly setting
in VolumeMounts. Defaults to false.
type: boolean
secretRef:
description: secretRef is the CHAP Secret for iSCSI target
and initiator authentication
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
targetPortal:
description: targetPortal is iSCSI Target Portal. The Portal
is either an IP or ip_addr:port if the port is other than
default (typically TCP ports 860 and 3260).
type: string
required:
- iqn
- lun
- targetPortal
type: object
name:
description: 'name of the volume. Must be a DNS_LABEL and unique
within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
nfs:
description: 'nfs represents an NFS mount on the host that shares
a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs'
properties:
path:
description: 'path that is exported by the NFS server. More
info: https://kubernetes.io/docs/concepts/storage/volumes#nfs'
type: string
readOnly:
description: 'readOnly here will force the NFS export to
be mounted with read-only permissions. Defaults to false.
More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs'
type: boolean
server:
description: 'server is the hostname or IP address of the
NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs'
type: string
required:
- path
- server
type: object
persistentVolumeClaim:
description: 'persistentVolumeClaimVolumeSource represents a
reference to a PersistentVolumeClaim in the same namespace.
More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
properties:
claimName:
description: 'claimName is the name of a PersistentVolumeClaim
in the same namespace as the pod using this volume. More
info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
type: string
readOnly:
description: readOnly Will force the ReadOnly setting in
VolumeMounts. Default false.
type: boolean
required:
- claimName
type: object
photonPersistentDisk:
description: photonPersistentDisk represents a PhotonController
persistent disk attached and mounted on kubelets host machine
properties:
fsType:
description: fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified.
type: string
pdID:
description: pdID is the ID that identifies Photon Controller
persistent disk
type: string
required:
- pdID
type: object
portworxVolume:
description: portworxVolume represents a portworx volume attached
and mounted on kubelets host machine
properties:
fsType:
description: fSType represents the filesystem type to mount
Must be a filesystem type supported by the host operating
system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4"
if unspecified.
type: string
readOnly:
description: readOnly defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
type: boolean
volumeID:
description: volumeID uniquely identifies a Portworx volume
type: string
required:
- volumeID
type: object
projected:
description: projected items for all in one resources secrets,
configmaps, and downward API
properties:
defaultMode:
description: defaultMode are the mode bits used to set permissions
on created files by default. Must be an octal value between
0000 and 0777 or a decimal value between 0 and 511. YAML
accepts both octal and decimal values, JSON requires decimal
values for mode bits. Directories within the path are
not affected by this setting. This might be in conflict
with other options that affect the file mode, like fsGroup,
and the result can be other mode bits set.
format: int32
type: integer
sources:
description: sources is the list of volume projections
items:
description: Projection that may be projected along with
other supported volume types
properties:
configMap:
description: configMap information about the configMap
data to project
properties:
items:
description: items if unspecified, each key-value
pair in the Data field of the referenced ConfigMap
will be projected into the volume as a file
whose name is the key and content is the value.
If specified, the listed keys will be projected
into the specified paths, and unlisted keys
will not be present. If a key is specified which
is not present in the ConfigMap, the volume
setup will error unless it is marked optional.
Paths must be relative and may not contain the
'..' path or start with '..'.
items:
description: Maps a string key to a path within
a volume.
properties:
key:
description: key is the key to project.
type: string
mode:
description: 'mode is Optional: mode bits
used to set permissions on this file.
Must be an octal value between 0000 and
0777 or a decimal value between 0 and
511. YAML accepts both octal and decimal
values, JSON requires decimal values for
mode bits. If not specified, the volume
defaultMode will be used. This might be
in conflict with other options that affect
the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
path:
description: path is the relative path of
the file to map the key to. May not be
an absolute path. May not contain the
path element '..'. May not start with
the string '..'.
type: string
required:
- key
- path
type: object
type: array
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: optional specify whether the ConfigMap
or its keys must be defined
type: boolean
type: object
x-kubernetes-map-type: atomic
downwardAPI:
description: downwardAPI information about the downwardAPI
data to project
properties:
items:
description: Items is a list of DownwardAPIVolume
file
items:
description: DownwardAPIVolumeFile represents
information to create the file containing
the pod field
properties:
fieldRef:
description: 'Required: Selects a field
of the pod: only annotations, labels,
name and namespace are supported.'
properties:
apiVersion:
description: Version of the schema the
FieldPath is written in terms of,
defaults to "v1".
type: string
fieldPath:
description: Path of the field to select
in the specified API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
mode:
description: 'Optional: mode bits used to
set permissions on this file, must be
an octal value between 0000 and 0777 or
a decimal value between 0 and 511. YAML
accepts both octal and decimal values,
JSON requires decimal values for mode
bits. If not specified, the volume defaultMode
will be used. This might be in conflict
with other options that affect the file
mode, like fsGroup, and the result can
be other mode bits set.'
format: int32
type: integer
path:
description: 'Required: Path is the relative
path name of the file to be created. Must
not be absolute or contain the ''..''
path. Must be utf-8 encoded. The first
item of the relative path must not start
with ''..'''
type: string
resourceFieldRef:
description: 'Selects a resource of the
container: only resources limits and requests
(limits.cpu, limits.memory, requests.cpu
and requests.memory) are currently supported.'
properties:
containerName:
description: 'Container name: required
for volumes, optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format
of the exposed resources, defaults
to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
description: 'Required: resource to
select'
type: string
required:
- resource
type: object
x-kubernetes-map-type: atomic
required:
- path
type: object
type: array
type: object
secret:
description: secret information about the secret data
to project
properties:
items:
description: items if unspecified, each key-value
pair in the Data field of the referenced Secret
will be projected into the volume as a file
whose name is the key and content is the value.
If specified, the listed keys will be projected
into the specified paths, and unlisted keys
will not be present. If a key is specified which
is not present in the Secret, the volume setup
will error unless it is marked optional. Paths
must be relative and may not contain the '..'
path or start with '..'.
items:
description: Maps a string key to a path within
a volume.
properties:
key:
description: key is the key to project.
type: string
mode:
description: 'mode is Optional: mode bits
used to set permissions on this file.
Must be an octal value between 0000 and
0777 or a decimal value between 0 and
511. YAML accepts both octal and decimal
values, JSON requires decimal values for
mode bits. If not specified, the volume
defaultMode will be used. This might be
in conflict with other options that affect
the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
path:
description: path is the relative path of
the file to map the key to. May not be
an absolute path. May not contain the
path element '..'. May not start with
the string '..'.
type: string
required:
- key
- path
type: object
type: array
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: optional field specify whether the
Secret or its key must be defined
type: boolean
type: object
x-kubernetes-map-type: atomic
serviceAccountToken:
description: serviceAccountToken is information about
the serviceAccountToken data to project
properties:
audience:
description: audience is the intended audience
of the token. A recipient of a token must identify
itself with an identifier specified in the audience
of the token, and otherwise should reject the
token. The audience defaults to the identifier
of the apiserver.
type: string
expirationSeconds:
description: expirationSeconds is the requested
duration of validity of the service account
token. As the token approaches expiration, the
kubelet volume plugin will proactively rotate
the service account token. The kubelet will
start trying to rotate the token if the token
is older than 80 percent of its time to live
or if the token is older than 24 hours.Defaults
to 1 hour and must be at least 10 minutes.
format: int64
type: integer
path:
description: path is the path relative to the
mount point of the file to project the token
into.
type: string
required:
- path
type: object
type: object
type: array
type: object
quobyte:
description: quobyte represents a Quobyte mount on the host
that shares a pod's lifetime
properties:
group:
description: group to map volume access to Default is no
group
type: string
readOnly:
description: readOnly here will force the Quobyte volume
to be mounted with read-only permissions. Defaults to
false.
type: boolean
registry:
description: registry represents a single or multiple Quobyte
Registry services specified as a string as host:port pair
(multiple entries are separated with commas) which acts
as the central registry for volumes
type: string
tenant:
description: tenant owning the given Quobyte volume in the
Backend Used with dynamically provisioned Quobyte volumes,
value is set by the plugin
type: string
user:
description: user to map volume access to Defaults to serivceaccount
user
type: string
volume:
description: volume is a string that references an already
created Quobyte volume by name.
type: string
required:
- registry
- volume
type: object
rbd:
description: 'rbd represents a Rados Block Device mount on the
host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md'
properties:
fsType:
description: 'fsType is the filesystem type of the volume
that you want to mount. Tip: Ensure that the filesystem
type is supported by the host operating system. Examples:
"ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
TODO: how do we prevent errors in the filesystem from
compromising the machine'
type: string
image:
description: 'image is the rados image name. More info:
https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
type: string
keyring:
description: 'keyring is the path to key ring for RBDUser.
Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
type: string
monitors:
description: 'monitors is a collection of Ceph monitors.
More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
items:
type: string
type: array
pool:
description: 'pool is the rados pool name. Default is rbd.
More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
type: string
readOnly:
description: 'readOnly here will force the ReadOnly setting
in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
type: boolean
secretRef:
description: 'secretRef is name of the authentication secret
for RBDUser. If provided overrides keyring. Default is
nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
user:
description: 'user is the rados user name. Default is admin.
More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
type: string
required:
- image
- monitors
type: object
scaleIO:
description: scaleIO represents a ScaleIO persistent volume
attached and mounted on Kubernetes nodes.
properties:
fsType:
description: fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Default is "xfs".
type: string
gateway:
description: gateway is the host address of the ScaleIO
API Gateway.
type: string
protectionDomain:
description: protectionDomain is the name of the ScaleIO
Protection Domain for the configured storage.
type: string
readOnly:
description: readOnly Defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
type: boolean
secretRef:
description: secretRef references to the secret for ScaleIO
user and other sensitive information. If this is not provided,
Login operation will fail.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
sslEnabled:
description: sslEnabled Flag enable/disable SSL communication
with Gateway, default false
type: boolean
storageMode:
description: storageMode indicates whether the storage for
a volume should be ThickProvisioned or ThinProvisioned.
Default is ThinProvisioned.
type: string
storagePool:
description: storagePool is the ScaleIO Storage Pool associated
with the protection domain.
type: string
system:
description: system is the name of the storage system as
configured in ScaleIO.
type: string
volumeName:
description: volumeName is the name of a volume already
created in the ScaleIO system that is associated with
this volume source.
type: string
required:
- gateway
- secretRef
- system
type: object
secret:
description: 'secret represents a secret that should populate
this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret'
properties:
defaultMode:
description: 'defaultMode is Optional: mode bits used to
set permissions on created files by default. Must be an
octal value between 0000 and 0777 or a decimal value between
0 and 511. YAML accepts both octal and decimal values,
JSON requires decimal values for mode bits. Defaults to
0644. Directories within the path are not affected by
this setting. This might be in conflict with other options
that affect the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
items:
description: items If unspecified, each key-value pair in
the Data field of the referenced Secret will be projected
into the volume as a file whose name is the key and content
is the value. If specified, the listed keys will be projected
into the specified paths, and unlisted keys will not be
present. If a key is specified which is not present in
the Secret, the volume setup will error unless it is marked
optional. Paths must be relative and may not contain the
'..' path or start with '..'.
items:
description: Maps a string key to a path within a volume.
properties:
key:
description: key is the key to project.
type: string
mode:
description: 'mode is Optional: mode bits used to
set permissions on this file. Must be an octal value
between 0000 and 0777 or a decimal value between
0 and 511. YAML accepts both octal and decimal values,
JSON requires decimal values for mode bits. If not
specified, the volume defaultMode will be used.
This might be in conflict with other options that
affect the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
path:
description: path is the relative path of the file
to map the key to. May not be an absolute path.
May not contain the path element '..'. May not start
with the string '..'.
type: string
required:
- key
- path
type: object
type: array
optional:
description: optional field specify whether the Secret or
its keys must be defined
type: boolean
secretName:
description: 'secretName is the name of the secret in the
pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret'
type: string
type: object
storageos:
description: storageOS represents a StorageOS volume attached
and mounted on Kubernetes nodes.
properties:
fsType:
description: fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified.
type: string
readOnly:
description: readOnly defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
type: boolean
secretRef:
description: secretRef specifies the secret to use for obtaining
the StorageOS API credentials. If not specified, default
values will be attempted.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
volumeName:
description: volumeName is the human-readable name of the
StorageOS volume. Volume names are only unique within
a namespace.
type: string
volumeNamespace:
description: volumeNamespace specifies the scope of the
volume within StorageOS. If no namespace is specified
then the Pod's namespace will be used. This allows the
Kubernetes name scoping to be mirrored within StorageOS
for tighter integration. Set VolumeName to any name to
override the default behaviour. Set to "default" if you
are not using namespaces within StorageOS. Namespaces
that do not pre-exist within StorageOS will be created.
type: string
type: object
vsphereVolume:
description: vsphereVolume represents a vSphere volume attached
and mounted on kubelets host machine
properties:
fsType:
description: fsType is filesystem type to mount. Must be
a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified.
type: string
storagePolicyID:
description: storagePolicyID is the storage Policy Based
Management (SPBM) profile ID associated with the StoragePolicyName.
type: string
storagePolicyName:
description: storagePolicyName is the storage Policy Based
Management (SPBM) profile name.
type: string
volumePath:
description: volumePath is the path that identifies vSphere
volume vmdk
type: string
required:
- volumePath
type: object
required:
- name
type: object
type: array
web:
description: Defines the web command line flags when starting Alertmanager.
properties:
getConcurrency:
description: Maximum number of GET requests processed concurrently.
This corresponds to the Alertmanager's `--web.get-concurrency`
flag.
format: int32
type: integer
httpConfig:
description: Defines HTTP parameters for web server.
properties:
headers:
description: List of headers that can be added to HTTP responses.
properties:
contentSecurityPolicy:
description: Set the Content-Security-Policy header to
HTTP responses. Unset if blank.
type: string
strictTransportSecurity:
description: Set the Strict-Transport-Security header
to HTTP responses. Unset if blank. Please make sure
that you use this with care as this header might force
browsers to load Prometheus and the other applications
hosted on the same domain and subdomains over HTTPS.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security
type: string
xContentTypeOptions:
description: Set the X-Content-Type-Options header to
HTTP responses. Unset if blank. Accepted value is nosniff.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options
enum:
- ""
- NoSniff
type: string
xFrameOptions:
description: Set the X-Frame-Options header to HTTP responses.
Unset if blank. Accepted values are deny and sameorigin.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options
enum:
- ""
- Deny
- SameOrigin
type: string
xXSSProtection:
description: Set the X-XSS-Protection header to all responses.
Unset if blank. https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection
type: string
type: object
http2:
description: Enable HTTP/2 support. Note that HTTP/2 is only
supported with TLS. When TLSConfig is not configured, HTTP/2
will be disabled. Whenever the value of the field changes,
a rolling update will be triggered.
type: boolean
type: object
timeout:
description: Timeout for HTTP requests. This corresponds to the
Alertmanager's `--web.timeout` flag.
format: int32
type: integer
tlsConfig:
description: Defines the TLS parameters for HTTPS.
properties:
cert:
description: Contains the TLS certificate for the server.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cipherSuites:
description: 'List of supported cipher suites for TLS versions
up to TLS 1.2. If empty, Go default cipher suites are used.
Available cipher suites are documented in the go documentation:
https://golang.org/pkg/crypto/tls/#pkg-constants'
items:
type: string
type: array
client_ca:
description: Contains the CA certificate for client certificate
authentication to the server.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientAuthType:
description: 'Server policy for client authentication. Maps
to ClientAuth Policies. For more detail on clientAuth options:
https://golang.org/pkg/crypto/tls/#ClientAuthType'
type: string
curvePreferences:
description: 'Elliptic curves that will be used in an ECDHE
handshake, in preference order. Available curves are documented
in the go documentation: https://golang.org/pkg/crypto/tls/#CurveID'
items:
type: string
type: array
keySecret:
description: Secret containing the TLS key for the server.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
maxVersion:
description: Maximum TLS version that is acceptable. Defaults
to TLS13.
type: string
minVersion:
description: Minimum TLS version that is acceptable. Defaults
to TLS12.
type: string
preferServerCipherSuites:
description: Controls whether the server selects the client's
most preferred cipher suite, or the server's most preferred
cipher suite. If true then the server's preference, as expressed
in the order of elements in cipherSuites, is used.
type: boolean
required:
- cert
- keySecret
type: object
type: object
type: object
status:
description: 'Most recent observed status of the Alertmanager cluster.
Read-only. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status'
properties:
availableReplicas:
description: Total number of available pods (ready for at least minReadySeconds)
targeted by this Alertmanager cluster.
format: int32
type: integer
conditions:
description: The current state of the Alertmanager object.
items:
description: Condition represents the state of the resources associated
with the Prometheus, Alertmanager or ThanosRuler resource.
properties:
lastTransitionTime:
description: lastTransitionTime is the time of the last update
to the current status property.
format: date-time
type: string
message:
description: Human-readable message indicating details for the
condition's last transition.
type: string
observedGeneration:
description: ObservedGeneration represents the .metadata.generation
that the condition was set based upon. For instance, if `.metadata.generation`
is currently 12, but the `.status.conditions[].observedGeneration`
is 9, the condition is out of date with respect to the current
state of the instance.
format: int64
type: integer
reason:
description: Reason for the condition's last transition.
type: string
status:
description: Status of the condition.
type: string
type:
description: Type of the condition being reported.
type: string
required:
- lastTransitionTime
- status
- type
type: object
type: array
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
paused:
description: Represents whether any actions on the underlying managed
objects are being performed. Only delete actions will be performed.
type: boolean
replicas:
description: Total number of non-terminated pods targeted by this
Alertmanager object (their labels match the selector).
format: int32
type: integer
unavailableReplicas:
description: Total number of unavailable pods targeted by this Alertmanager
object.
format: int32
type: integer
updatedReplicas:
description: Total number of non-terminated pods targeted by this
Alertmanager object that have the desired version spec.
format: int32
type: integer
required:
- availableReplicas
- paused
- replicas
- unavailableReplicas
- updatedReplicas
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.13.0
operator.prometheus.io/version: 0.71.2
name: prometheuses.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
categories:
- prometheus-operator
kind: Prometheus
listKind: PrometheusList
plural: prometheuses
shortNames:
- prom
singular: prometheus
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: The version of Prometheus
jsonPath: .spec.version
name: Version
type: string
- description: The number of desired replicas
jsonPath: .spec.replicas
name: Desired
type: integer
- description: The number of ready replicas
jsonPath: .status.availableReplicas
name: Ready
type: integer
- jsonPath: .status.conditions[?(@.type == 'Reconciled')].status
name: Reconciled
type: string
- jsonPath: .status.conditions[?(@.type == 'Available')].status
name: Available
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- description: Whether the resource reconciliation is paused or not
jsonPath: .status.paused
name: Paused
priority: 1
type: boolean
name: v1
schema:
openAPIV3Schema:
description: Prometheus defines a Prometheus deployment.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: 'Specification of the desired behavior of the Prometheus
cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status'
properties:
additionalAlertManagerConfigs:
description: "AdditionalAlertManagerConfigs specifies a key of a Secret
containing additional Prometheus Alertmanager configurations. The
Alertmanager configurations are appended to the configuration generated
by the Prometheus Operator. They must be formatted according to
the official Prometheus documentation: \n https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config
\n The user is responsible for making sure that the configurations
are valid \n Note that using this feature may expose the possibility
to break upgrades of Prometheus. It is advised to review Prometheus
release notes to ensure that no incompatible AlertManager configs
are going to break Prometheus after the upgrade."
properties:
key:
description: The key of the secret to select from. Must be a
valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
additionalAlertRelabelConfigs:
description: "AdditionalAlertRelabelConfigs specifies a key of a Secret
containing additional Prometheus alert relabel configurations. The
alert relabel configurations are appended to the configuration generated
by the Prometheus Operator. They must be formatted according to
the official Prometheus documentation: \n https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs
\n The user is responsible for making sure that the configurations
are valid \n Note that using this feature may expose the possibility
to break upgrades of Prometheus. It is advised to review Prometheus
release notes to ensure that no incompatible alert relabel configs
are going to break Prometheus after the upgrade."
properties:
key:
description: The key of the secret to select from. Must be a
valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
additionalArgs:
description: "AdditionalArgs allows setting additional arguments for
the 'prometheus' container. \n It is intended for e.g. activating
hidden flags which are not supported by the dedicated configuration
options yet. The arguments are passed as-is to the Prometheus container
which may cause issues if they are invalid or not supported by the
given Prometheus version. \n In case of an argument conflict (e.g.
an argument which is already set by the operator itself) or when
providing an invalid argument, the reconciliation will fail and
an error will be logged."
items:
description: Argument as part of the AdditionalArgs list.
properties:
name:
description: Name of the argument, e.g. "scrape.discovery-reload-interval".
minLength: 1
type: string
value:
description: Argument value, e.g. 30s. Can be empty for name-only
arguments (e.g. --storage.tsdb.no-lockfile)
type: string
required:
- name
type: object
type: array
additionalScrapeConfigs:
description: 'AdditionalScrapeConfigs allows specifying a key of a
Secret containing additional Prometheus scrape configurations. Scrape
configurations specified are appended to the configurations generated
by the Prometheus Operator. Job configurations specified must have
the form as specified in the official Prometheus documentation:
https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config.
As scrape configs are appended, the user is responsible to make
sure it is valid. Note that using this feature may expose the possibility
to break upgrades of Prometheus. It is advised to review Prometheus
release notes to ensure that no incompatible scrape configs are
going to break Prometheus after the upgrade.'
properties:
key:
description: The key of the secret to select from. Must be a
valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
affinity:
description: Defines the Pods' affinity scheduling rules if specified.
properties:
nodeAffinity:
description: Describes node affinity scheduling rules for the
pod.
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to
nodes that satisfy the affinity expressions specified by
this field, but it may choose a node that violates one or
more of the expressions. The node that is most preferred
is the one with the greatest sum of weights, i.e. for each
node that meets all of the scheduling requirements (resource
request, requiredDuringScheduling affinity expressions,
etc.), compute a sum by iterating through the elements of
this field and adding "weight" to the sum if the node matches
the corresponding matchExpressions; the node(s) with the
highest sum are the most preferred.
items:
description: An empty preferred scheduling term matches
all objects with implicit weight 0 (i.e. it's a no-op).
A null preferred scheduling term matches no objects (i.e.
is also a no-op).
properties:
preference:
description: A node selector term, associated with the
corresponding weight.
properties:
matchExpressions:
description: A list of node selector requirements
by node's labels.
items:
description: A node selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists, DoesNotExist. Gt, and
Lt.
type: string
values:
description: An array of string values. If
the operator is In or NotIn, the values
array must be non-empty. If the operator
is Exists or DoesNotExist, the values array
must be empty. If the operator is Gt or
Lt, the values array must have a single
element, which will be interpreted as an
integer. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements
by node's fields.
items:
description: A node selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists, DoesNotExist. Gt, and
Lt.
type: string
values:
description: An array of string values. If
the operator is In or NotIn, the values
array must be non-empty. If the operator
is Exists or DoesNotExist, the values array
must be empty. If the operator is Gt or
Lt, the values array must have a single
element, which will be interpreted as an
integer. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
x-kubernetes-map-type: atomic
weight:
description: Weight associated with matching the corresponding
nodeSelectorTerm, in the range 1-100.
format: int32
type: integer
required:
- preference
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified by this
field are not met at scheduling time, the pod will not be
scheduled onto the node. If the affinity requirements specified
by this field cease to be met at some point during pod execution
(e.g. due to an update), the system may or may not try to
eventually evict the pod from its node.
properties:
nodeSelectorTerms:
description: Required. A list of node selector terms.
The terms are ORed.
items:
description: A null or empty node selector term matches
no objects. The requirements of them are ANDed. The
TopologySelectorTerm type implements a subset of the
NodeSelectorTerm.
properties:
matchExpressions:
description: A list of node selector requirements
by node's labels.
items:
description: A node selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists, DoesNotExist. Gt, and
Lt.
type: string
values:
description: An array of string values. If
the operator is In or NotIn, the values
array must be non-empty. If the operator
is Exists or DoesNotExist, the values array
must be empty. If the operator is Gt or
Lt, the values array must have a single
element, which will be interpreted as an
integer. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements
by node's fields.
items:
description: A node selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists, DoesNotExist. Gt, and
Lt.
type: string
values:
description: An array of string values. If
the operator is In or NotIn, the values
array must be non-empty. If the operator
is Exists or DoesNotExist, the values array
must be empty. If the operator is Gt or
Lt, the values array must have a single
element, which will be interpreted as an
integer. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
x-kubernetes-map-type: atomic
type: array
required:
- nodeSelectorTerms
type: object
x-kubernetes-map-type: atomic
type: object
podAffinity:
description: Describes pod affinity scheduling rules (e.g. co-locate
this pod in the same node, zone, etc. as some other pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to
nodes that satisfy the affinity expressions specified by
this field, but it may choose a node that violates one or
more of the expressions. The node that is most preferred
is the one with the greatest sum of weights, i.e. for each
node that meets all of the scheduling requirements (resource
request, requiredDuringScheduling affinity expressions,
etc.), compute a sum by iterating through the elements of
this field and adding "weight" to the sum if the node has
pods which matches the corresponding podAffinityTerm; the
node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm
fields are added per-node to find the most preferred node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term, associated
with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied
to the union of the namespaces selected by this
field and the ones listed in the namespaces field.
null selector and null or empty namespaces list
means "this pod's namespace". An empty selector
({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list
of namespace names that the term applies to. The
term is applied to the union of the namespaces
listed in this field and the ones selected by
namespaceSelector. null or empty namespaces list
and null namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods
matching the labelSelector in the specified namespaces,
where co-located is defined as running on a node
whose value of the label with key topologyKey
matches that of any node on which any of the selected
pods is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching the corresponding
podAffinityTerm, in the range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified by this
field are not met at scheduling time, the pod will not be
scheduled onto the node. If the affinity requirements specified
by this field cease to be met at some point during pod execution
(e.g. due to a pod label update), the system may or may
not try to eventually evict the pod from its node. When
there are multiple elements, the lists of nodes corresponding
to each podAffinityTerm are intersected, i.e. all terms
must be satisfied.
items:
description: Defines a set of pods (namely those matching
the labelSelector relative to the given namespace(s))
that this pod should be co-located (affinity) or not co-located
(anti-affinity) with, where co-located is defined as running
on a node whose value of the label with key
matches that of any node on which a pod of the set of
pods is running
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If the
operator is Exists or DoesNotExist, the
values array must be empty. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value". The
requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied to the
union of the namespaces selected by this field and
the ones listed in the namespaces field. null selector
and null or empty namespaces list means "this pod's
namespace". An empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If the
operator is Exists or DoesNotExist, the
values array must be empty. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value". The
requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list of namespace
names that the term applies to. The term is applied
to the union of the namespaces listed in this field
and the ones selected by namespaceSelector. null or
empty namespaces list and null namespaceSelector means
"this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods matching
the labelSelector in the specified namespaces, where
co-located is defined as running on a node whose value
of the label with key topologyKey matches that of
any node on which any of the selected pods is running.
Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
podAntiAffinity:
description: Describes pod anti-affinity scheduling rules (e.g.
avoid putting this pod in the same node, zone, etc. as some
other pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to
nodes that satisfy the anti-affinity expressions specified
by this field, but it may choose a node that violates one
or more of the expressions. The node that is most preferred
is the one with the greatest sum of weights, i.e. for each
node that meets all of the scheduling requirements (resource
request, requiredDuringScheduling anti-affinity expressions,
etc.), compute a sum by iterating through the elements of
this field and adding "weight" to the sum if the node has
pods which matches the corresponding podAffinityTerm; the
node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm
fields are added per-node to find the most preferred node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term, associated
with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied
to the union of the namespaces selected by this
field and the ones listed in the namespaces field.
null selector and null or empty namespaces list
means "this pod's namespace". An empty selector
({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list
of namespace names that the term applies to. The
term is applied to the union of the namespaces
listed in this field and the ones selected by
namespaceSelector. null or empty namespaces list
and null namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods
matching the labelSelector in the specified namespaces,
where co-located is defined as running on a node
whose value of the label with key topologyKey
matches that of any node on which any of the selected
pods is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching the corresponding
podAffinityTerm, in the range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the anti-affinity requirements specified by
this field are not met at scheduling time, the pod will
not be scheduled onto the node. If the anti-affinity requirements
specified by this field cease to be met at some point during
pod execution (e.g. due to a pod label update), the system
may or may not try to eventually evict the pod from its
node. When there are multiple elements, the lists of nodes
corresponding to each podAffinityTerm are intersected, i.e.
all terms must be satisfied.
items:
description: Defines a set of pods (namely those matching
the labelSelector relative to the given namespace(s))
that this pod should be co-located (affinity) or not co-located
(anti-affinity) with, where co-located is defined as running
on a node whose value of the label with key
matches that of any node on which a pod of the set of
pods is running
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If the
operator is Exists or DoesNotExist, the
values array must be empty. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value". The
requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied to the
union of the namespaces selected by this field and
the ones listed in the namespaces field. null selector
and null or empty namespaces list means "this pod's
namespace". An empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If the
operator is Exists or DoesNotExist, the
values array must be empty. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value". The
requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list of namespace
names that the term applies to. The term is applied
to the union of the namespaces listed in this field
and the ones selected by namespaceSelector. null or
empty namespaces list and null namespaceSelector means
"this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods matching
the labelSelector in the specified namespaces, where
co-located is defined as running on a node whose value
of the label with key topologyKey matches that of
any node on which any of the selected pods is running.
Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
type: object
alerting:
description: Defines the settings related to Alertmanager.
properties:
alertmanagers:
description: AlertmanagerEndpoints Prometheus should fire alerts
against.
items:
description: AlertmanagerEndpoints defines a selection of a
single Endpoints object containing Alertmanager IPs to fire
alerts against.
properties:
apiVersion:
description: Version of the Alertmanager API that Prometheus
uses to send alerts. It can be "v1" or "v2".
type: string
authorization:
description: "Authorization section for Alertmanager. \n
Cannot be set at the same time as `basicAuth`, `bearerTokenFile`
or `sigv4`."
properties:
credentials:
description: Selects a key of a Secret in the namespace
that contains the credentials for authentication.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type. The value
is case-insensitive. \n \"Basic\" is not a supported
value. \n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: "BasicAuth configuration for Alertmanager.
\n Cannot be set at the same time as `bearerTokenFile`,
`authorization` or `sigv4`."
properties:
password:
description: '`password` specifies a key of a Secret
containing the password for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a Secret
containing the username for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenFile:
description: "File to read bearer token for Alertmanager.
\n Cannot be set at the same time as `basicAuth`, `authorization`,
or `sigv4`. \n Deprecated: this will be removed in a future
release. Prefer using `authorization`."
type: string
enableHttp2:
description: Whether to enable HTTP2.
type: boolean
name:
description: Name of the Endpoints object in the namespace.
type: string
namespace:
description: Namespace of the Endpoints object.
type: string
pathPrefix:
description: Prefix for the HTTP path alerts are pushed
to.
type: string
port:
anyOf:
- type: integer
- type: string
description: Port on which the Alertmanager API is exposed.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use when firing alerts.
type: string
sigv4:
description: "Sigv4 allows to configures AWS's Signature
Verification 4 for the URL. \n It requires Prometheus
>= v2.48.0. \n Cannot be set at the same time as `basicAuth`,
`bearerTokenFile` or `authorization`."
properties:
accessKey:
description: AccessKey is the AWS API key. If not specified,
the environment variable `AWS_ACCESS_KEY_ID` is used.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
profile:
description: Profile is the named AWS profile used to
authenticate.
type: string
region:
description: Region is the AWS region. If blank, the
region from the default credentials chain used.
type: string
roleArn:
description: RoleArn is the named AWS profile used to
authenticate.
type: string
secretKey:
description: SecretKey is the AWS API secret. If not
specified, the environment variable `AWS_SECRET_ACCESS_KEY`
is used.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
timeout:
description: Timeout is a per-target Alertmanager timeout
when pushing alerts.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
tlsConfig:
description: TLS Config to use for Alertmanager.
properties:
ca:
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to use for
the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the
targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
caFile:
description: Path to the CA cert in the Prometheus container
to use for the targets.
type: string
cert:
description: Client certificate to present when doing
client-authentication.
properties:
configMap:
description: ConfigMap containing data to use for
the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the
targets.
properties:
key:
description: The key of the secret to select
from. Must be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion,
kind, uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
certFile:
description: Path to the client cert file in the Prometheus
container for the targets.
type: string
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keyFile:
description: Path to the client key file in the Prometheus
container for the targets.
type: string
keySecret:
description: Secret containing the client key file for
the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
required:
- name
- namespace
- port
type: object
type: array
required:
- alertmanagers
type: object
allowOverlappingBlocks:
description: "AllowOverlappingBlocks enables vertical compaction and
vertical query merge in Prometheus. \n Deprecated: this flag has
no effect for Prometheus >= 2.39.0 where overlapping blocks are
enabled by default."
type: boolean
apiserverConfig:
description: 'APIServerConfig allows specifying a host and auth methods
to access the Kuberntees API server. If null, Prometheus is assumed
to run inside of the cluster: it will discover the API servers automatically
and use the Pod''s CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/.'
properties:
authorization:
description: "Authorization section for the API server. \n Cannot
be set at the same time as `basicAuth`, `bearerToken`, or `bearerTokenFile`."
properties:
credentials:
description: Selects a key of a Secret in the namespace that
contains the credentials for authentication.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
credentialsFile:
description: File to read a secret from, mutually exclusive
with `credentials`.
type: string
type:
description: "Defines the authentication type. The value is
case-insensitive. \n \"Basic\" is not a supported value.
\n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: "BasicAuth configuration for the API server. \n Cannot
be set at the same time as `authorization`, `bearerToken`, or
`bearerTokenFile`."
properties:
password:
description: '`password` specifies a key of a Secret containing
the password for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a Secret containing
the username for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerToken:
description: "*Warning: this field shouldn't be used because the
token value appears in clear-text. Prefer using `authorization`.*
\n Deprecated: this will be removed in a future release."
type: string
bearerTokenFile:
description: "File to read bearer token for accessing apiserver.
\n Cannot be set at the same time as `basicAuth`, `authorization`,
or `bearerToken`. \n Deprecated: this will be removed in a future
release. Prefer using `authorization`."
type: string
host:
description: Kubernetes API address consisting of a hostname or
IP address followed by an optional port number.
type: string
tlsConfig:
description: TLS Config to use for the API server.
properties:
ca:
description: Certificate authority used when verifying server
certificates.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
caFile:
description: Path to the CA cert in the Prometheus container
to use for the targets.
type: string
cert:
description: Client certificate to present when doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
certFile:
description: Path to the client cert file in the Prometheus
container for the targets.
type: string
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keyFile:
description: Path to the client key file in the Prometheus
container for the targets.
type: string
keySecret:
description: Secret containing the client key file for the
targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
required:
- host
type: object
arbitraryFSAccessThroughSMs:
description: When true, ServiceMonitor, PodMonitor and Probe object
are forbidden to reference arbitrary files on the file system of
the 'prometheus' container. When a ServiceMonitor's endpoint specifies
a `bearerTokenFile` value (e.g. '/var/run/secrets/kubernetes.io/serviceaccount/token'),
a malicious target can get access to the Prometheus service account's
token in the Prometheus' scrape request. Setting `spec.arbitraryFSAccessThroughSM`
to 'true' would prevent the attack. Users should instead provide
the credentials using the `spec.bearerTokenSecret` field.
properties:
deny:
type: boolean
type: object
baseImage:
description: 'Deprecated: use ''spec.image'' instead.'
type: string
bodySizeLimit:
description: BodySizeLimit defines per-scrape on response body size.
Only valid in Prometheus versions 2.45.0 and newer.
pattern: (^0|([0-9]*[.])?[0-9]+((K|M|G|T|E|P)i?)?B)$
type: string
configMaps:
description: ConfigMaps is a list of ConfigMaps in the same namespace
as the Prometheus object, which shall be mounted into the Prometheus
Pods. Each ConfigMap is added to the StatefulSet definition as a
volume named `configmap-`. The ConfigMaps are mounted
into /etc/prometheus/configmaps/ in the 'prometheus'
container.
items:
type: string
type: array
containers:
description: "Containers allows injecting additional containers or
modifying operator generated containers. This can be used to allow
adding an authentication proxy to the Pods or to change the behavior
of an operator generated container. Containers described here modify
an operator generated container if they share the same name and
modifications are done via a strategic merge patch. \n The names
of containers managed by the operator are: * `prometheus` * `config-reloader`
* `thanos-sidecar` \n Overriding containers is entirely outside
the scope of what the maintainers will support and by doing so,
you accept that this behaviour may break at any time without notice."
items:
description: A single application container that you want to run
within a pod.
properties:
args:
description: 'Arguments to the entrypoint. The container image''s
CMD is used if this is not provided. Variable references $(VAR_NAME)
are expanded using the container''s environment. If a variable
cannot be resolved, the reference in the input string will
be unchanged. Double $$ are reduced to a single $, which allows
for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
produce the string literal "$(VAR_NAME)". Escaped references
will never be expanded, regardless of whether the variable
exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
items:
type: string
type: array
command:
description: 'Entrypoint array. Not executed within a shell.
The container image''s ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container''s
environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME) syntax:
i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
Escaped references will never be expanded, regardless of whether
the variable exists or not. Cannot be updated. More info:
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
items:
type: string
type: array
env:
description: List of environment variables to set in the container.
Cannot be updated.
items:
description: EnvVar represents an environment variable present
in a Container.
properties:
name:
description: Name of the environment variable. Must be
a C_IDENTIFIER.
type: string
value:
description: 'Variable references $(VAR_NAME) are expanded
using the previously defined environment variables in
the container and any service environment variables.
If a variable cannot be resolved, the reference in the
input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME)
syntax: i.e. "$$(VAR_NAME)" will produce the string
literal "$(VAR_NAME)". Escaped references will never
be expanded, regardless of whether the variable exists
or not. Defaults to "".'
type: string
valueFrom:
description: Source for the environment variable's value.
Cannot be used if value is not empty.
properties:
configMapKeyRef:
description: Selects a key of a ConfigMap.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
fieldRef:
description: 'Selects a field of the pod: supports
metadata.name, metadata.namespace, `metadata.labels['''']`,
`metadata.annotations['''']`, spec.nodeName,
spec.serviceAccountName, status.hostIP, status.podIP,
status.podIPs.'
properties:
apiVersion:
description: Version of the schema the FieldPath
is written in terms of, defaults to "v1".
type: string
fieldPath:
description: Path of the field to select in the
specified API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: 'Selects a resource of the container:
only resources limits and requests (limits.cpu,
limits.memory, limits.ephemeral-storage, requests.cpu,
requests.memory and requests.ephemeral-storage)
are currently supported.'
properties:
containerName:
description: 'Container name: required for volumes,
optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format of the
exposed resources, defaults to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
description: 'Required: resource to select'
type: string
required:
- resource
type: object
x-kubernetes-map-type: atomic
secretKeyRef:
description: Selects a key of a secret in the pod's
namespace
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
required:
- name
type: object
type: array
envFrom:
description: List of sources to populate environment variables
in the container. The keys defined within a source must be
a C_IDENTIFIER. All invalid keys will be reported as an event
when the container is starting. When a key exists in multiple
sources, the value associated with the last source will take
precedence. Values defined by an Env with a duplicate key
will take precedence. Cannot be updated.
items:
description: EnvFromSource represents the source of a set
of ConfigMaps
properties:
configMapRef:
description: The ConfigMap to select from
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap must be
defined
type: boolean
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each
key in the ConfigMap. Must be a C_IDENTIFIER.
type: string
secretRef:
description: The Secret to select from
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret must be defined
type: boolean
type: object
x-kubernetes-map-type: atomic
type: object
type: array
image:
description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images
This field is optional to allow higher level config management
to default or override container images in workload controllers
like Deployments and StatefulSets.'
type: string
imagePullPolicy:
description: 'Image pull policy. One of Always, Never, IfNotPresent.
Defaults to Always if :latest tag is specified, or IfNotPresent
otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images'
type: string
lifecycle:
description: Actions that the management system should take
in response to container lifecycle events. Cannot be updated.
properties:
postStart:
description: 'PostStart is called immediately after a container
is created. If the handler fails, the container is terminated
and restarted according to its restart policy. Other management
of the container blocks until the hook completes. More
info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for
the command is root ('/') in the container's
filesystem. The command is simply exec'd, it is
not run inside a shell, so traditional shell instructions
('|', etc) won't work. To use a shell, you need
to explicitly call out to that shell. Exit status
of 0 is treated as live/healthy and non-zero is
unhealthy.
items:
type: string
type: array
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to
the pod IP. You probably want to set "Host" in
httpHeaders instead.
type: string
httpHeaders:
description: Custom headers to set in the request.
HTTP allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the
host. Defaults to HTTP.
type: string
required:
- port
type: object
tcpSocket:
description: Deprecated. TCPSocket is NOT supported
as a LifecycleHandler and kept for the backward compatibility.
There are no validation of this field and lifecycle
hooks will fail in runtime when tcp handler is specified.
properties:
host:
description: 'Optional: Host name to connect to,
defaults to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
type: object
preStop:
description: 'PreStop is called immediately before a container
is terminated due to an API request or management event
such as liveness/startup probe failure, preemption, resource
contention, etc. The handler is not called if the container
crashes or exits. The Pod''s termination grace period
countdown begins before the PreStop hook is executed.
Regardless of the outcome of the handler, the container
will eventually terminate within the Pod''s termination
grace period (unless delayed by finalizers). Other management
of the container blocks until the hook completes or until
the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for
the command is root ('/') in the container's
filesystem. The command is simply exec'd, it is
not run inside a shell, so traditional shell instructions
('|', etc) won't work. To use a shell, you need
to explicitly call out to that shell. Exit status
of 0 is treated as live/healthy and non-zero is
unhealthy.
items:
type: string
type: array
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to
the pod IP. You probably want to set "Host" in
httpHeaders instead.
type: string
httpHeaders:
description: Custom headers to set in the request.
HTTP allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the
host. Defaults to HTTP.
type: string
required:
- port
type: object
tcpSocket:
description: Deprecated. TCPSocket is NOT supported
as a LifecycleHandler and kept for the backward compatibility.
There are no validation of this field and lifecycle
hooks will fail in runtime when tcp handler is specified.
properties:
host:
description: 'Optional: Host name to connect to,
defaults to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
type: object
type: object
livenessProbe:
description: 'Periodic probe of container liveness. Container
will be restarted if the probe fails. Cannot be updated. More
info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
name:
description: Name of the container specified as a DNS_LABEL.
Each container in a pod must have a unique name (DNS_LABEL).
Cannot be updated.
type: string
ports:
description: List of ports to expose from the container. Not
specifying a port here DOES NOT prevent that port from being
exposed. Any port which is listening on the default "0.0.0.0"
address inside a container will be accessible from the network.
Modifying this array with strategic merge patch may corrupt
the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255.
Cannot be updated.
items:
description: ContainerPort represents a network port in a
single container.
properties:
containerPort:
description: Number of port to expose on the pod's IP
address. This must be a valid port number, 0 < x < 65536.
format: int32
type: integer
hostIP:
description: What host IP to bind the external port to.
type: string
hostPort:
description: Number of port to expose on the host. If
specified, this must be a valid port number, 0 < x <
65536. If HostNetwork is specified, this must match
ContainerPort. Most containers do not need this.
format: int32
type: integer
name:
description: If specified, this must be an IANA_SVC_NAME
and unique within the pod. Each named port in a pod
must have a unique name. Name for the port that can
be referred to by services.
type: string
protocol:
default: TCP
description: Protocol for port. Must be UDP, TCP, or SCTP.
Defaults to "TCP".
type: string
required:
- containerPort
type: object
type: array
x-kubernetes-list-map-keys:
- containerPort
- protocol
x-kubernetes-list-type: map
readinessProbe:
description: 'Periodic probe of container service readiness.
Container will be removed from service endpoints if the probe
fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
resizePolicy:
description: Resources resize policy for the container.
items:
description: ContainerResizePolicy represents resource resize
policy for the container.
properties:
resourceName:
description: 'Name of the resource to which this resource
resize policy applies. Supported values: cpu, memory.'
type: string
restartPolicy:
description: Restart policy to apply when specified resource
is resized. If not specified, it defaults to NotRequired.
type: string
required:
- resourceName
- restartPolicy
type: object
type: array
x-kubernetes-list-type: atomic
resources:
description: 'Compute Resources required by this container.
Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
properties:
claims:
description: "Claims lists the names of resources, defined
in spec.resourceClaims, that are used by this container.
\n This is an alpha field and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable. It can only
be set for containers."
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of one entry
in pod.spec.resourceClaims of the Pod where this
field is used. It makes that resource available
inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount of compute
resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount of compute
resources required. If Requests is omitted for a container,
it defaults to Limits if that is explicitly specified,
otherwise to an implementation-defined value. Requests
cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
restartPolicy:
description: 'RestartPolicy defines the restart behavior of
individual containers in a pod. This field may only be set
for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
the restart behavior is defined by the Pod''s restart policy
and the container type. Setting the RestartPolicy as "Always"
for the init container will have the following effect: this
init container will be continually restarted on exit until
all regular containers have terminated. Once all regular containers
have completed, all init containers with restartPolicy "Always"
will be shut down. This lifecycle differs from normal init
containers and is often referred to as a "sidecar" container.
Although this init container still starts in the init container
sequence, it does not wait for the container to complete before
proceeding to the next init container. Instead, the next init
container starts immediately after this init container is
started, or after any startupProbe has successfully completed.'
type: string
securityContext:
description: 'SecurityContext defines the security options the
container should be run with. If set, the fields of SecurityContext
override the equivalent fields of PodSecurityContext. More
info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/'
properties:
allowPrivilegeEscalation:
description: 'AllowPrivilegeEscalation controls whether
a process can gain more privileges than its parent process.
This bool directly controls if the no_new_privs flag will
be set on the container process. AllowPrivilegeEscalation
is true always when the container is: 1) run as Privileged
2) has CAP_SYS_ADMIN Note that this field cannot be set
when spec.os.name is windows.'
type: boolean
capabilities:
description: The capabilities to add/drop when running containers.
Defaults to the default set of capabilities granted by
the container runtime. Note that this field cannot be
set when spec.os.name is windows.
properties:
add:
description: Added capabilities
items:
description: Capability represent POSIX capabilities
type
type: string
type: array
drop:
description: Removed capabilities
items:
description: Capability represent POSIX capabilities
type
type: string
type: array
type: object
privileged:
description: Run container in privileged mode. Processes
in privileged containers are essentially equivalent to
root on the host. Defaults to false. Note that this field
cannot be set when spec.os.name is windows.
type: boolean
procMount:
description: procMount denotes the type of proc mount to
use for the containers. The default is DefaultProcMount
which uses the container runtime defaults for readonly
paths and masked paths. This requires the ProcMountType
feature flag to be enabled. Note that this field cannot
be set when spec.os.name is windows.
type: string
readOnlyRootFilesystem:
description: Whether this container has a read-only root
filesystem. Default is false. Note that this field cannot
be set when spec.os.name is windows.
type: boolean
runAsGroup:
description: The GID to run the entrypoint of the container
process. Uses runtime default if unset. May also be set
in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence. Note that this field cannot be set when
spec.os.name is windows.
format: int64
type: integer
runAsNonRoot:
description: Indicates that the container must run as a
non-root user. If true, the Kubelet will validate the
image at runtime to ensure that it does not run as UID
0 (root) and fail to start the container if it does. If
unset or false, no such validation will be performed.
May also be set in PodSecurityContext. If set in both
SecurityContext and PodSecurityContext, the value specified
in SecurityContext takes precedence.
type: boolean
runAsUser:
description: The UID to run the entrypoint of the container
process. Defaults to user specified in image metadata
if unspecified. May also be set in PodSecurityContext. If
set in both SecurityContext and PodSecurityContext, the
value specified in SecurityContext takes precedence. Note
that this field cannot be set when spec.os.name is windows.
format: int64
type: integer
seLinuxOptions:
description: The SELinux context to be applied to the container.
If unspecified, the container runtime will allocate a
random SELinux context for each container. May also be
set in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence. Note that this field cannot be set when
spec.os.name is windows.
properties:
level:
description: Level is SELinux level label that applies
to the container.
type: string
role:
description: Role is a SELinux role label that applies
to the container.
type: string
type:
description: Type is a SELinux type label that applies
to the container.
type: string
user:
description: User is a SELinux user label that applies
to the container.
type: string
type: object
seccompProfile:
description: The seccomp options to use by this container.
If seccomp options are provided at both the pod & container
level, the container options override the pod options.
Note that this field cannot be set when spec.os.name is
windows.
properties:
localhostProfile:
description: localhostProfile indicates a profile defined
in a file on the node should be used. The profile
must be preconfigured on the node to work. Must be
a descending path, relative to the kubelet's configured
seccomp profile location. Must be set if type is "Localhost".
Must NOT be set for any other type.
type: string
type:
description: "type indicates which kind of seccomp profile
will be applied. Valid options are: \n Localhost -
a profile defined in a file on the node should be
used. RuntimeDefault - the container runtime default
profile should be used. Unconfined - no profile should
be applied."
type: string
required:
- type
type: object
windowsOptions:
description: The Windows specific settings applied to all
containers. If unspecified, the options from the PodSecurityContext
will be used. If set in both SecurityContext and PodSecurityContext,
the value specified in SecurityContext takes precedence.
Note that this field cannot be set when spec.os.name is
linux.
properties:
gmsaCredentialSpec:
description: GMSACredentialSpec is where the GMSA admission
webhook (https://github.com/kubernetes-sigs/windows-gmsa)
inlines the contents of the GMSA credential spec named
by the GMSACredentialSpecName field.
type: string
gmsaCredentialSpecName:
description: GMSACredentialSpecName is the name of the
GMSA credential spec to use.
type: string
hostProcess:
description: HostProcess determines if a container should
be run as a 'Host Process' container. All of a Pod's
containers must have the same effective HostProcess
value (it is not allowed to have a mix of HostProcess
containers and non-HostProcess containers). In addition,
if HostProcess is true then HostNetwork must also
be set to true.
type: boolean
runAsUserName:
description: The UserName in Windows to run the entrypoint
of the container process. Defaults to the user specified
in image metadata if unspecified. May also be set
in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence.
type: string
type: object
type: object
startupProbe:
description: 'StartupProbe indicates that the Pod has successfully
initialized. If specified, no other probes are executed until
this completes successfully. If this probe fails, the Pod
will be restarted, just as if the livenessProbe failed. This
can be used to provide different probe parameters at the beginning
of a Pod''s lifecycle, when it might take a long time to load
data or warm a cache, than during steady-state operation.
This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
stdin:
description: Whether this container should allocate a buffer
for stdin in the container runtime. If this is not set, reads
from stdin in the container will always result in EOF. Default
is false.
type: boolean
stdinOnce:
description: Whether the container runtime should close the
stdin channel after it has been opened by a single attach.
When stdin is true the stdin stream will remain open across
multiple attach sessions. If stdinOnce is set to true, stdin
is opened on container start, is empty until the first client
attaches to stdin, and then remains open and accepts data
until the client disconnects, at which time stdin is closed
and remains closed until the container is restarted. If this
flag is false, a container processes that reads from stdin
will never receive an EOF. Default is false
type: boolean
terminationMessagePath:
description: 'Optional: Path at which the file to which the
container''s termination message will be written is mounted
into the container''s filesystem. Message written is intended
to be brief final status, such as an assertion failure message.
Will be truncated by the node if greater than 4096 bytes.
The total message length across all containers will be limited
to 12kb. Defaults to /dev/termination-log. Cannot be updated.'
type: string
terminationMessagePolicy:
description: Indicate how the termination message should be
populated. File will use the contents of terminationMessagePath
to populate the container status message on both success and
failure. FallbackToLogsOnError will use the last chunk of
container log output if the termination message file is empty
and the container exited with an error. The log output is
limited to 2048 bytes or 80 lines, whichever is smaller. Defaults
to File. Cannot be updated.
type: string
tty:
description: Whether this container should allocate a TTY for
itself, also requires 'stdin' to be true. Default is false.
type: boolean
volumeDevices:
description: volumeDevices is the list of block devices to be
used by the container.
items:
description: volumeDevice describes a mapping of a raw block
device within a container.
properties:
devicePath:
description: devicePath is the path inside of the container
that the device will be mapped to.
type: string
name:
description: name must match the name of a persistentVolumeClaim
in the pod
type: string
required:
- devicePath
- name
type: object
type: array
volumeMounts:
description: Pod volumes to mount into the container's filesystem.
Cannot be updated.
items:
description: VolumeMount describes a mounting of a Volume
within a container.
properties:
mountPath:
description: Path within the container at which the volume
should be mounted. Must not contain ':'.
type: string
mountPropagation:
description: mountPropagation determines how mounts are
propagated from the host to container and the other
way around. When not set, MountPropagationNone is used.
This field is beta in 1.10.
type: string
name:
description: This must match the Name of a Volume.
type: string
readOnly:
description: Mounted read-only if true, read-write otherwise
(false or unspecified). Defaults to false.
type: boolean
subPath:
description: Path within the volume from which the container's
volume should be mounted. Defaults to "" (volume's root).
type: string
subPathExpr:
description: Expanded path within the volume from which
the container's volume should be mounted. Behaves similarly
to SubPath but environment variable references $(VAR_NAME)
are expanded using the container's environment. Defaults
to "" (volume's root). SubPathExpr and SubPath are mutually
exclusive.
type: string
required:
- mountPath
- name
type: object
type: array
workingDir:
description: Container's working directory. If not specified,
the container runtime's default will be used, which might
be configured in the container image. Cannot be updated.
type: string
required:
- name
type: object
type: array
disableCompaction:
description: When true, the Prometheus compaction is disabled.
type: boolean
enableAdminAPI:
description: "Enables access to the Prometheus web admin API. \n WARNING:
Enabling the admin APIs enables mutating endpoints, to delete data,
shutdown Prometheus, and more. Enabling this should be done with
care and the user is advised to add additional authentication authorization
via a proxy to ensure only clients authorized to perform these actions
can do so. \n For more information: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis"
type: boolean
enableFeatures:
description: "Enable access to Prometheus feature flags. By default,
no features are enabled. \n Enabling features which are disabled
by default is entirely outside the scope of what the maintainers
will support and by doing so, you accept that this behaviour may
break at any time without notice. \n For more information see https://prometheus.io/docs/prometheus/latest/feature_flags/"
items:
type: string
type: array
enableRemoteWriteReceiver:
description: "Enable Prometheus to be used as a receiver for the Prometheus
remote write protocol. \n WARNING: This is not considered an efficient
way of ingesting samples. Use it with caution for specific low-volume
use cases. It is not suitable for replacing the ingestion via scraping
and turning Prometheus into a push-based metrics collection system.
For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#remote-write-receiver
\n It requires Prometheus >= v2.33.0."
type: boolean
enforcedBodySizeLimit:
description: "When defined, enforcedBodySizeLimit specifies a global
limit on the size of uncompressed response body that will be accepted
by Prometheus. Targets responding with a body larger than this many
bytes will cause the scrape to fail. \n It requires Prometheus >=
v2.28.0."
pattern: (^0|([0-9]*[.])?[0-9]+((K|M|G|T|E|P)i?)?B)$
type: string
enforcedKeepDroppedTargets:
description: "When defined, enforcedKeepDroppedTargets specifies a
global limit on the number of targets dropped by relabeling that
will be kept in memory. The value overrides any `spec.keepDroppedTargets`
set by ServiceMonitor, PodMonitor, Probe objects unless `spec.keepDroppedTargets`
is greater than zero and less than `spec.enforcedKeepDroppedTargets`.
\n It requires Prometheus >= v2.47.0."
format: int64
type: integer
enforcedLabelLimit:
description: "When defined, enforcedLabelLimit specifies a global
limit on the number of labels per sample. The value overrides any
`spec.labelLimit` set by ServiceMonitor, PodMonitor, Probe objects
unless `spec.labelLimit` is greater than zero and less than `spec.enforcedLabelLimit`.
\n It requires Prometheus >= v2.27.0."
format: int64
type: integer
enforcedLabelNameLengthLimit:
description: "When defined, enforcedLabelNameLengthLimit specifies
a global limit on the length of labels name per sample. The value
overrides any `spec.labelNameLengthLimit` set by ServiceMonitor,
PodMonitor, Probe objects unless `spec.labelNameLengthLimit` is
greater than zero and less than `spec.enforcedLabelNameLengthLimit`.
\n It requires Prometheus >= v2.27.0."
format: int64
type: integer
enforcedLabelValueLengthLimit:
description: "When not null, enforcedLabelValueLengthLimit defines
a global limit on the length of labels value per sample. The value
overrides any `spec.labelValueLengthLimit` set by ServiceMonitor,
PodMonitor, Probe objects unless `spec.labelValueLengthLimit` is
greater than zero and less than `spec.enforcedLabelValueLengthLimit`.
\n It requires Prometheus >= v2.27.0."
format: int64
type: integer
enforcedNamespaceLabel:
description: "When not empty, a label will be added to \n 1. All metrics
scraped from `ServiceMonitor`, `PodMonitor`, `Probe` and `ScrapeConfig`
objects. 2. All metrics generated from recording rules defined in
`PrometheusRule` objects. 3. All alerts generated from alerting
rules defined in `PrometheusRule` objects. 4. All vector selectors
of PromQL expressions defined in `PrometheusRule` objects. \n The
label will not added for objects referenced in `spec.excludedFromEnforcement`.
\n The label's name is this field's value. The label's value is
the namespace of the `ServiceMonitor`, `PodMonitor`, `Probe` or
`PrometheusRule` object."
type: string
enforcedSampleLimit:
description: "When defined, enforcedSampleLimit specifies a global
limit on the number of scraped samples that will be accepted. This
overrides any `spec.sampleLimit` set by ServiceMonitor, PodMonitor,
Probe objects unless `spec.sampleLimit` is greater than zero and
less than than `spec.enforcedSampleLimit`. \n It is meant to be
used by admins to keep the overall number of samples/series under
a desired limit."
format: int64
type: integer
enforcedTargetLimit:
description: "When defined, enforcedTargetLimit specifies a global
limit on the number of scraped targets. The value overrides any
`spec.targetLimit` set by ServiceMonitor, PodMonitor, Probe objects
unless `spec.targetLimit` is greater than zero and less than `spec.enforcedTargetLimit`.
\n It is meant to be used by admins to to keep the overall number
of targets under a desired limit."
format: int64
type: integer
evaluationInterval:
default: 30s
description: 'Interval between rule evaluations. Default: "30s"'
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
excludedFromEnforcement:
description: "List of references to PodMonitor, ServiceMonitor, Probe
and PrometheusRule objects to be excluded from enforcing a namespace
label of origin. \n It is only applicable if `spec.enforcedNamespaceLabel`
set to true."
items:
description: ObjectReference references a PodMonitor, ServiceMonitor,
Probe or PrometheusRule object.
properties:
group:
default: monitoring.coreos.com
description: Group of the referent. When not specified, it defaults
to `monitoring.coreos.com`
enum:
- monitoring.coreos.com
type: string
name:
description: Name of the referent. When not set, all resources
in the namespace are matched.
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
minLength: 1
type: string
resource:
description: Resource of the referent.
enum:
- prometheusrules
- servicemonitors
- podmonitors
- probes
- scrapeconfigs
type: string
required:
- namespace
- resource
type: object
type: array
exemplars:
description: Exemplars related settings that are runtime reloadable.
It requires to enable the `exemplar-storage` feature flag to be
effective.
properties:
maxSize:
description: "Maximum number of exemplars stored in memory for
all series. \n exemplar-storage itself must be enabled using
the `spec.enableFeature` option for exemplars to be scraped
in the first place. \n If not set, Prometheus uses its default
value. A value of zero or less than zero disables the storage."
format: int64
type: integer
type: object
externalLabels:
additionalProperties:
type: string
description: The labels to add to any time series or alerts when communicating
with external systems (federation, remote storage, Alertmanager).
Labels defined by `spec.replicaExternalLabelName` and `spec.prometheusExternalLabelName`
take precedence over this list.
type: object
externalUrl:
description: The external URL under which the Prometheus service is
externally available. This is necessary to generate correct URLs
(for instance if Prometheus is accessible behind an Ingress resource).
type: string
hostAliases:
description: Optional list of hosts and IPs that will be injected
into the Pod's hosts file if specified.
items:
description: HostAlias holds the mapping between IP and hostnames
that will be injected as an entry in the pod's hosts file.
properties:
hostnames:
description: Hostnames for the above IP address.
items:
type: string
type: array
ip:
description: IP address of the host file entry.
type: string
required:
- hostnames
- ip
type: object
type: array
x-kubernetes-list-map-keys:
- ip
x-kubernetes-list-type: map
hostNetwork:
description: "Use the host's network namespace if true. \n Make sure
to understand the security implications if you want to enable it
(https://kubernetes.io/docs/concepts/configuration/overview/). \n
When hostNetwork is enabled, this will set the DNS policy to `ClusterFirstWithHostNet`
automatically."
type: boolean
ignoreNamespaceSelectors:
description: When true, `spec.namespaceSelector` from all PodMonitor,
ServiceMonitor and Probe objects will be ignored. They will only
discover targets within the namespace of the PodMonitor, ServiceMonitor
and Probe object.
type: boolean
image:
description: "Container image name for Prometheus. If specified, it
takes precedence over the `spec.baseImage`, `spec.tag` and `spec.sha`
fields. \n Specifying `spec.version` is still necessary to ensure
the Prometheus Operator knows which version of Prometheus is being
configured. \n If neither `spec.image` nor `spec.baseImage` are
defined, the operator will use the latest upstream version of Prometheus
available at the time when the operator was released."
type: string
imagePullPolicy:
description: Image pull policy for the 'prometheus', 'init-config-reloader'
and 'config-reloader' containers. See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy
for more details.
enum:
- ""
- Always
- Never
- IfNotPresent
type: string
imagePullSecrets:
description: An optional list of references to Secrets in the same
namespace to use for pulling images from registries. See http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
items:
description: LocalObjectReference contains enough information to
let you locate the referenced object inside the same namespace.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
type: array
initContainers:
description: "InitContainers allows injecting initContainers to the
Pod definition. Those can be used to e.g. fetch secrets for injection
into the Prometheus configuration from external sources. Any errors
during the execution of an initContainer will lead to a restart
of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
InitContainers described here modify an operator generated init
containers if they share the same name and modifications are done
via a strategic merge patch. \n The names of init container name
managed by the operator are: * `init-config-reloader`. \n Overriding
init containers is entirely outside the scope of what the maintainers
will support and by doing so, you accept that this behaviour may
break at any time without notice."
items:
description: A single application container that you want to run
within a pod.
properties:
args:
description: 'Arguments to the entrypoint. The container image''s
CMD is used if this is not provided. Variable references $(VAR_NAME)
are expanded using the container''s environment. If a variable
cannot be resolved, the reference in the input string will
be unchanged. Double $$ are reduced to a single $, which allows
for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
produce the string literal "$(VAR_NAME)". Escaped references
will never be expanded, regardless of whether the variable
exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
items:
type: string
type: array
command:
description: 'Entrypoint array. Not executed within a shell.
The container image''s ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container''s
environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME) syntax:
i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
Escaped references will never be expanded, regardless of whether
the variable exists or not. Cannot be updated. More info:
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
items:
type: string
type: array
env:
description: List of environment variables to set in the container.
Cannot be updated.
items:
description: EnvVar represents an environment variable present
in a Container.
properties:
name:
description: Name of the environment variable. Must be
a C_IDENTIFIER.
type: string
value:
description: 'Variable references $(VAR_NAME) are expanded
using the previously defined environment variables in
the container and any service environment variables.
If a variable cannot be resolved, the reference in the
input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME)
syntax: i.e. "$$(VAR_NAME)" will produce the string
literal "$(VAR_NAME)". Escaped references will never
be expanded, regardless of whether the variable exists
or not. Defaults to "".'
type: string
valueFrom:
description: Source for the environment variable's value.
Cannot be used if value is not empty.
properties:
configMapKeyRef:
description: Selects a key of a ConfigMap.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
fieldRef:
description: 'Selects a field of the pod: supports
metadata.name, metadata.namespace, `metadata.labels['''']`,
`metadata.annotations['''']`, spec.nodeName,
spec.serviceAccountName, status.hostIP, status.podIP,
status.podIPs.'
properties:
apiVersion:
description: Version of the schema the FieldPath
is written in terms of, defaults to "v1".
type: string
fieldPath:
description: Path of the field to select in the
specified API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: 'Selects a resource of the container:
only resources limits and requests (limits.cpu,
limits.memory, limits.ephemeral-storage, requests.cpu,
requests.memory and requests.ephemeral-storage)
are currently supported.'
properties:
containerName:
description: 'Container name: required for volumes,
optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format of the
exposed resources, defaults to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
description: 'Required: resource to select'
type: string
required:
- resource
type: object
x-kubernetes-map-type: atomic
secretKeyRef:
description: Selects a key of a secret in the pod's
namespace
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
required:
- name
type: object
type: array
envFrom:
description: List of sources to populate environment variables
in the container. The keys defined within a source must be
a C_IDENTIFIER. All invalid keys will be reported as an event
when the container is starting. When a key exists in multiple
sources, the value associated with the last source will take
precedence. Values defined by an Env with a duplicate key
will take precedence. Cannot be updated.
items:
description: EnvFromSource represents the source of a set
of ConfigMaps
properties:
configMapRef:
description: The ConfigMap to select from
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap must be
defined
type: boolean
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each
key in the ConfigMap. Must be a C_IDENTIFIER.
type: string
secretRef:
description: The Secret to select from
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret must be defined
type: boolean
type: object
x-kubernetes-map-type: atomic
type: object
type: array
image:
description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images
This field is optional to allow higher level config management
to default or override container images in workload controllers
like Deployments and StatefulSets.'
type: string
imagePullPolicy:
description: 'Image pull policy. One of Always, Never, IfNotPresent.
Defaults to Always if :latest tag is specified, or IfNotPresent
otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images'
type: string
lifecycle:
description: Actions that the management system should take
in response to container lifecycle events. Cannot be updated.
properties:
postStart:
description: 'PostStart is called immediately after a container
is created. If the handler fails, the container is terminated
and restarted according to its restart policy. Other management
of the container blocks until the hook completes. More
info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for
the command is root ('/') in the container's
filesystem. The command is simply exec'd, it is
not run inside a shell, so traditional shell instructions
('|', etc) won't work. To use a shell, you need
to explicitly call out to that shell. Exit status
of 0 is treated as live/healthy and non-zero is
unhealthy.
items:
type: string
type: array
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to
the pod IP. You probably want to set "Host" in
httpHeaders instead.
type: string
httpHeaders:
description: Custom headers to set in the request.
HTTP allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the
host. Defaults to HTTP.
type: string
required:
- port
type: object
tcpSocket:
description: Deprecated. TCPSocket is NOT supported
as a LifecycleHandler and kept for the backward compatibility.
There are no validation of this field and lifecycle
hooks will fail in runtime when tcp handler is specified.
properties:
host:
description: 'Optional: Host name to connect to,
defaults to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
type: object
preStop:
description: 'PreStop is called immediately before a container
is terminated due to an API request or management event
such as liveness/startup probe failure, preemption, resource
contention, etc. The handler is not called if the container
crashes or exits. The Pod''s termination grace period
countdown begins before the PreStop hook is executed.
Regardless of the outcome of the handler, the container
will eventually terminate within the Pod''s termination
grace period (unless delayed by finalizers). Other management
of the container blocks until the hook completes or until
the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for
the command is root ('/') in the container's
filesystem. The command is simply exec'd, it is
not run inside a shell, so traditional shell instructions
('|', etc) won't work. To use a shell, you need
to explicitly call out to that shell. Exit status
of 0 is treated as live/healthy and non-zero is
unhealthy.
items:
type: string
type: array
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to
the pod IP. You probably want to set "Host" in
httpHeaders instead.
type: string
httpHeaders:
description: Custom headers to set in the request.
HTTP allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the
host. Defaults to HTTP.
type: string
required:
- port
type: object
tcpSocket:
description: Deprecated. TCPSocket is NOT supported
as a LifecycleHandler and kept for the backward compatibility.
There are no validation of this field and lifecycle
hooks will fail in runtime when tcp handler is specified.
properties:
host:
description: 'Optional: Host name to connect to,
defaults to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
type: object
type: object
livenessProbe:
description: 'Periodic probe of container liveness. Container
will be restarted if the probe fails. Cannot be updated. More
info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
name:
description: Name of the container specified as a DNS_LABEL.
Each container in a pod must have a unique name (DNS_LABEL).
Cannot be updated.
type: string
ports:
description: List of ports to expose from the container. Not
specifying a port here DOES NOT prevent that port from being
exposed. Any port which is listening on the default "0.0.0.0"
address inside a container will be accessible from the network.
Modifying this array with strategic merge patch may corrupt
the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255.
Cannot be updated.
items:
description: ContainerPort represents a network port in a
single container.
properties:
containerPort:
description: Number of port to expose on the pod's IP
address. This must be a valid port number, 0 < x < 65536.
format: int32
type: integer
hostIP:
description: What host IP to bind the external port to.
type: string
hostPort:
description: Number of port to expose on the host. If
specified, this must be a valid port number, 0 < x <
65536. If HostNetwork is specified, this must match
ContainerPort. Most containers do not need this.
format: int32
type: integer
name:
description: If specified, this must be an IANA_SVC_NAME
and unique within the pod. Each named port in a pod
must have a unique name. Name for the port that can
be referred to by services.
type: string
protocol:
default: TCP
description: Protocol for port. Must be UDP, TCP, or SCTP.
Defaults to "TCP".
type: string
required:
- containerPort
type: object
type: array
x-kubernetes-list-map-keys:
- containerPort
- protocol
x-kubernetes-list-type: map
readinessProbe:
description: 'Periodic probe of container service readiness.
Container will be removed from service endpoints if the probe
fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
resizePolicy:
description: Resources resize policy for the container.
items:
description: ContainerResizePolicy represents resource resize
policy for the container.
properties:
resourceName:
description: 'Name of the resource to which this resource
resize policy applies. Supported values: cpu, memory.'
type: string
restartPolicy:
description: Restart policy to apply when specified resource
is resized. If not specified, it defaults to NotRequired.
type: string
required:
- resourceName
- restartPolicy
type: object
type: array
x-kubernetes-list-type: atomic
resources:
description: 'Compute Resources required by this container.
Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
properties:
claims:
description: "Claims lists the names of resources, defined
in spec.resourceClaims, that are used by this container.
\n This is an alpha field and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable. It can only
be set for containers."
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of one entry
in pod.spec.resourceClaims of the Pod where this
field is used. It makes that resource available
inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount of compute
resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount of compute
resources required. If Requests is omitted for a container,
it defaults to Limits if that is explicitly specified,
otherwise to an implementation-defined value. Requests
cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
restartPolicy:
description: 'RestartPolicy defines the restart behavior of
individual containers in a pod. This field may only be set
for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
the restart behavior is defined by the Pod''s restart policy
and the container type. Setting the RestartPolicy as "Always"
for the init container will have the following effect: this
init container will be continually restarted on exit until
all regular containers have terminated. Once all regular containers
have completed, all init containers with restartPolicy "Always"
will be shut down. This lifecycle differs from normal init
containers and is often referred to as a "sidecar" container.
Although this init container still starts in the init container
sequence, it does not wait for the container to complete before
proceeding to the next init container. Instead, the next init
container starts immediately after this init container is
started, or after any startupProbe has successfully completed.'
type: string
securityContext:
description: 'SecurityContext defines the security options the
container should be run with. If set, the fields of SecurityContext
override the equivalent fields of PodSecurityContext. More
info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/'
properties:
allowPrivilegeEscalation:
description: 'AllowPrivilegeEscalation controls whether
a process can gain more privileges than its parent process.
This bool directly controls if the no_new_privs flag will
be set on the container process. AllowPrivilegeEscalation
is true always when the container is: 1) run as Privileged
2) has CAP_SYS_ADMIN Note that this field cannot be set
when spec.os.name is windows.'
type: boolean
capabilities:
description: The capabilities to add/drop when running containers.
Defaults to the default set of capabilities granted by
the container runtime. Note that this field cannot be
set when spec.os.name is windows.
properties:
add:
description: Added capabilities
items:
description: Capability represent POSIX capabilities
type
type: string
type: array
drop:
description: Removed capabilities
items:
description: Capability represent POSIX capabilities
type
type: string
type: array
type: object
privileged:
description: Run container in privileged mode. Processes
in privileged containers are essentially equivalent to
root on the host. Defaults to false. Note that this field
cannot be set when spec.os.name is windows.
type: boolean
procMount:
description: procMount denotes the type of proc mount to
use for the containers. The default is DefaultProcMount
which uses the container runtime defaults for readonly
paths and masked paths. This requires the ProcMountType
feature flag to be enabled. Note that this field cannot
be set when spec.os.name is windows.
type: string
readOnlyRootFilesystem:
description: Whether this container has a read-only root
filesystem. Default is false. Note that this field cannot
be set when spec.os.name is windows.
type: boolean
runAsGroup:
description: The GID to run the entrypoint of the container
process. Uses runtime default if unset. May also be set
in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence. Note that this field cannot be set when
spec.os.name is windows.
format: int64
type: integer
runAsNonRoot:
description: Indicates that the container must run as a
non-root user. If true, the Kubelet will validate the
image at runtime to ensure that it does not run as UID
0 (root) and fail to start the container if it does. If
unset or false, no such validation will be performed.
May also be set in PodSecurityContext. If set in both
SecurityContext and PodSecurityContext, the value specified
in SecurityContext takes precedence.
type: boolean
runAsUser:
description: The UID to run the entrypoint of the container
process. Defaults to user specified in image metadata
if unspecified. May also be set in PodSecurityContext. If
set in both SecurityContext and PodSecurityContext, the
value specified in SecurityContext takes precedence. Note
that this field cannot be set when spec.os.name is windows.
format: int64
type: integer
seLinuxOptions:
description: The SELinux context to be applied to the container.
If unspecified, the container runtime will allocate a
random SELinux context for each container. May also be
set in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence. Note that this field cannot be set when
spec.os.name is windows.
properties:
level:
description: Level is SELinux level label that applies
to the container.
type: string
role:
description: Role is a SELinux role label that applies
to the container.
type: string
type:
description: Type is a SELinux type label that applies
to the container.
type: string
user:
description: User is a SELinux user label that applies
to the container.
type: string
type: object
seccompProfile:
description: The seccomp options to use by this container.
If seccomp options are provided at both the pod & container
level, the container options override the pod options.
Note that this field cannot be set when spec.os.name is
windows.
properties:
localhostProfile:
description: localhostProfile indicates a profile defined
in a file on the node should be used. The profile
must be preconfigured on the node to work. Must be
a descending path, relative to the kubelet's configured
seccomp profile location. Must be set if type is "Localhost".
Must NOT be set for any other type.
type: string
type:
description: "type indicates which kind of seccomp profile
will be applied. Valid options are: \n Localhost -
a profile defined in a file on the node should be
used. RuntimeDefault - the container runtime default
profile should be used. Unconfined - no profile should
be applied."
type: string
required:
- type
type: object
windowsOptions:
description: The Windows specific settings applied to all
containers. If unspecified, the options from the PodSecurityContext
will be used. If set in both SecurityContext and PodSecurityContext,
the value specified in SecurityContext takes precedence.
Note that this field cannot be set when spec.os.name is
linux.
properties:
gmsaCredentialSpec:
description: GMSACredentialSpec is where the GMSA admission
webhook (https://github.com/kubernetes-sigs/windows-gmsa)
inlines the contents of the GMSA credential spec named
by the GMSACredentialSpecName field.
type: string
gmsaCredentialSpecName:
description: GMSACredentialSpecName is the name of the
GMSA credential spec to use.
type: string
hostProcess:
description: HostProcess determines if a container should
be run as a 'Host Process' container. All of a Pod's
containers must have the same effective HostProcess
value (it is not allowed to have a mix of HostProcess
containers and non-HostProcess containers). In addition,
if HostProcess is true then HostNetwork must also
be set to true.
type: boolean
runAsUserName:
description: The UserName in Windows to run the entrypoint
of the container process. Defaults to the user specified
in image metadata if unspecified. May also be set
in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence.
type: string
type: object
type: object
startupProbe:
description: 'StartupProbe indicates that the Pod has successfully
initialized. If specified, no other probes are executed until
this completes successfully. If this probe fails, the Pod
will be restarted, just as if the livenessProbe failed. This
can be used to provide different probe parameters at the beginning
of a Pod''s lifecycle, when it might take a long time to load
data or warm a cache, than during steady-state operation.
This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
stdin:
description: Whether this container should allocate a buffer
for stdin in the container runtime. If this is not set, reads
from stdin in the container will always result in EOF. Default
is false.
type: boolean
stdinOnce:
description: Whether the container runtime should close the
stdin channel after it has been opened by a single attach.
When stdin is true the stdin stream will remain open across
multiple attach sessions. If stdinOnce is set to true, stdin
is opened on container start, is empty until the first client
attaches to stdin, and then remains open and accepts data
until the client disconnects, at which time stdin is closed
and remains closed until the container is restarted. If this
flag is false, a container processes that reads from stdin
will never receive an EOF. Default is false
type: boolean
terminationMessagePath:
description: 'Optional: Path at which the file to which the
container''s termination message will be written is mounted
into the container''s filesystem. Message written is intended
to be brief final status, such as an assertion failure message.
Will be truncated by the node if greater than 4096 bytes.
The total message length across all containers will be limited
to 12kb. Defaults to /dev/termination-log. Cannot be updated.'
type: string
terminationMessagePolicy:
description: Indicate how the termination message should be
populated. File will use the contents of terminationMessagePath
to populate the container status message on both success and
failure. FallbackToLogsOnError will use the last chunk of
container log output if the termination message file is empty
and the container exited with an error. The log output is
limited to 2048 bytes or 80 lines, whichever is smaller. Defaults
to File. Cannot be updated.
type: string
tty:
description: Whether this container should allocate a TTY for
itself, also requires 'stdin' to be true. Default is false.
type: boolean
volumeDevices:
description: volumeDevices is the list of block devices to be
used by the container.
items:
description: volumeDevice describes a mapping of a raw block
device within a container.
properties:
devicePath:
description: devicePath is the path inside of the container
that the device will be mapped to.
type: string
name:
description: name must match the name of a persistentVolumeClaim
in the pod
type: string
required:
- devicePath
- name
type: object
type: array
volumeMounts:
description: Pod volumes to mount into the container's filesystem.
Cannot be updated.
items:
description: VolumeMount describes a mounting of a Volume
within a container.
properties:
mountPath:
description: Path within the container at which the volume
should be mounted. Must not contain ':'.
type: string
mountPropagation:
description: mountPropagation determines how mounts are
propagated from the host to container and the other
way around. When not set, MountPropagationNone is used.
This field is beta in 1.10.
type: string
name:
description: This must match the Name of a Volume.
type: string
readOnly:
description: Mounted read-only if true, read-write otherwise
(false or unspecified). Defaults to false.
type: boolean
subPath:
description: Path within the volume from which the container's
volume should be mounted. Defaults to "" (volume's root).
type: string
subPathExpr:
description: Expanded path within the volume from which
the container's volume should be mounted. Behaves similarly
to SubPath but environment variable references $(VAR_NAME)
are expanded using the container's environment. Defaults
to "" (volume's root). SubPathExpr and SubPath are mutually
exclusive.
type: string
required:
- mountPath
- name
type: object
type: array
workingDir:
description: Container's working directory. If not specified,
the container runtime's default will be used, which might
be configured in the container image. Cannot be updated.
type: string
required:
- name
type: object
type: array
keepDroppedTargets:
description: "Per-scrape limit on the number of targets dropped by
relabeling that will be kept in memory. 0 means no limit. \n It
requires Prometheus >= v2.47.0."
format: int64
type: integer
labelLimit:
description: Per-scrape limit on number of labels that will be accepted
for a sample. Only valid in Prometheus versions 2.45.0 and newer.
format: int64
type: integer
labelNameLengthLimit:
description: Per-scrape limit on length of labels name that will be
accepted for a sample. Only valid in Prometheus versions 2.45.0
and newer.
format: int64
type: integer
labelValueLengthLimit:
description: Per-scrape limit on length of labels value that will
be accepted for a sample. Only valid in Prometheus versions 2.45.0
and newer.
format: int64
type: integer
listenLocal:
description: When true, the Prometheus server listens on the loopback
address instead of the Pod IP's address.
type: boolean
logFormat:
description: Log format for Log level for Prometheus and the config-reloader
sidecar.
enum:
- ""
- logfmt
- json
type: string
logLevel:
description: Log level for Prometheus and the config-reloader sidecar.
enum:
- ""
- debug
- info
- warn
- error
type: string
maximumStartupDurationSeconds:
description: Defines the maximum time that the `prometheus` container's
startup probe will wait before being considered failed. The startup
probe will return success after the WAL replay is complete. If set,
the value should be greater than 60 (seconds). Otherwise it will
be equal to 600 seconds (15 minutes).
format: int32
minimum: 60
type: integer
minReadySeconds:
description: "Minimum number of seconds for which a newly created
Pod should be ready without any of its container crashing for it
to be considered available. Defaults to 0 (pod will be considered
available as soon as it is ready) \n This is an alpha field from
kubernetes 1.22 until 1.24 which requires enabling the StatefulSetMinReadySeconds
feature gate."
format: int32
type: integer
nodeSelector:
additionalProperties:
type: string
description: Defines on which Nodes the Pods are scheduled.
type: object
overrideHonorLabels:
description: When true, Prometheus resolves label conflicts by renaming
the labels in the scraped data to "exported_" for all
targets created from service and pod monitors. Otherwise the HonorLabels
field of the service or pod monitor applies.
type: boolean
overrideHonorTimestamps:
description: When true, Prometheus ignores the timestamps for all
the targets created from service and pod monitors. Otherwise the
HonorTimestamps field of the service or pod monitor applies.
type: boolean
paused:
description: When a Prometheus deployment is paused, no actions except
for deletion will be performed on the underlying objects.
type: boolean
persistentVolumeClaimRetentionPolicy:
description: The field controls if and how PVCs are deleted during
the lifecycle of a StatefulSet. The default behavior is all PVCs
are retained. This is an alpha field from kubernetes 1.23 until
1.26 and a beta field from 1.26. It requires enabling the StatefulSetAutoDeletePVC
feature gate.
properties:
whenDeleted:
description: WhenDeleted specifies what happens to PVCs created
from StatefulSet VolumeClaimTemplates when the StatefulSet is
deleted. The default policy of `Retain` causes PVCs to not be
affected by StatefulSet deletion. The `Delete` policy causes
those PVCs to be deleted.
type: string
whenScaled:
description: WhenScaled specifies what happens to PVCs created
from StatefulSet VolumeClaimTemplates when the StatefulSet is
scaled down. The default policy of `Retain` causes PVCs to not
be affected by a scaledown. The `Delete` policy causes the associated
PVCs for any excess pods above the replica count to be deleted.
type: string
type: object
podMetadata:
description: "PodMetadata configures labels and annotations which
are propagated to the Prometheus pods. \n The following items are
reserved and cannot be overridden: * \"prometheus\" label, set to
the name of the Prometheus object. * \"app.kubernetes.io/instance\"
label, set to the name of the Prometheus object. * \"app.kubernetes.io/managed-by\"
label, set to \"prometheus-operator\". * \"app.kubernetes.io/name\"
label, set to \"prometheus\". * \"app.kubernetes.io/version\" label,
set to the Prometheus version. * \"operator.prometheus.io/name\"
label, set to the name of the Prometheus object. * \"operator.prometheus.io/shard\"
label, set to the shard number of the Prometheus object. * \"kubectl.kubernetes.io/default-container\"
annotation, set to \"prometheus\"."
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value map stored
with a resource that may be set by external tools to store and
retrieve arbitrary metadata. They are not queryable and should
be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be used to
organize and categorize (scope and select) objects. May match
selectors of replication controllers and services. More info:
http://kubernetes.io/docs/user-guide/labels'
type: object
name:
description: 'Name must be unique within a namespace. Is required
when creating resources, although some resources may allow a
client to request the generation of an appropriate name automatically.
Name is primarily intended for creation idempotence and configuration
definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
type: object
podMonitorNamespaceSelector:
description: Namespaces to match for PodMonitors discovery. An empty
label selector matches all namespaces. A null label selector matches
the current namespace only.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
podMonitorSelector:
description: "*Experimental* PodMonitors to be selected for target
discovery. An empty label selector matches all objects. A null label
selector matches no objects. \n If `spec.serviceMonitorSelector`,
`spec.podMonitorSelector`, `spec.probeSelector` and `spec.scrapeConfigSelector`
are null, the Prometheus configuration is unmanaged. The Prometheus
operator will ensure that the Prometheus configuration's Secret
exists, but it is the responsibility of the user to provide the
raw gzipped Prometheus configuration under the `prometheus.yaml.gz`
key. This behavior is *deprecated* and will be removed in the next
major version of the custom resource definition. It is recommended
to use `spec.additionalScrapeConfigs` instead."
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
podTargetLabels:
description: PodTargetLabels are appended to the `spec.podTargetLabels`
field of all PodMonitor and ServiceMonitor objects.
items:
type: string
type: array
portName:
default: web
description: 'Port name used for the pods and governing service. Default:
"web"'
type: string
priorityClassName:
description: Priority class assigned to the Pods.
type: string
probeNamespaceSelector:
description: '*Experimental* Namespaces to match for Probe discovery.
An empty label selector matches all namespaces. A null label selector
matches the current namespace only.'
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
probeSelector:
description: "*Experimental* Probes to be selected for target discovery.
An empty label selector matches all objects. A null label selector
matches no objects. \n If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`,
`spec.probeSelector` and `spec.scrapeConfigSelector` are null, the
Prometheus configuration is unmanaged. The Prometheus operator will
ensure that the Prometheus configuration's Secret exists, but it
is the responsibility of the user to provide the raw gzipped Prometheus
configuration under the `prometheus.yaml.gz` key. This behavior
is *deprecated* and will be removed in the next major version of
the custom resource definition. It is recommended to use `spec.additionalScrapeConfigs`
instead."
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
prometheusExternalLabelName:
description: "Name of Prometheus external label used to denote the
Prometheus instance name. The external label will _not_ be added
when the field is set to the empty string (`\"\"`). \n Default:
\"prometheus\""
type: string
prometheusRulesExcludedFromEnforce:
description: 'Defines the list of PrometheusRule objects to which
the namespace label enforcement doesn''t apply. This is only relevant
when `spec.enforcedNamespaceLabel` is set to true. Deprecated: use
`spec.excludedFromEnforcement` instead.'
items:
description: PrometheusRuleExcludeConfig enables users to configure
excluded PrometheusRule names and their namespaces to be ignored
while enforcing namespace label for alerts and metrics.
properties:
ruleName:
description: Name of the excluded PrometheusRule object.
type: string
ruleNamespace:
description: Namespace of the excluded PrometheusRule object.
type: string
required:
- ruleName
- ruleNamespace
type: object
type: array
query:
description: QuerySpec defines the configuration of the Promethus
query service.
properties:
lookbackDelta:
description: The delta difference allowed for retrieving metrics
during expression evaluations.
type: string
maxConcurrency:
description: Number of concurrent queries that can be run at once.
format: int32
minimum: 1
type: integer
maxSamples:
description: Maximum number of samples a single query can load
into memory. Note that queries will fail if they would load
more samples than this into memory, so this also limits the
number of samples a query can return.
format: int32
type: integer
timeout:
description: Maximum time a query may take before being aborted.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
type: object
queryLogFile:
description: "queryLogFile specifies where the file to which PromQL
queries are logged. \n If the filename has an empty path, e.g. 'query.log',
The Prometheus Pods will mount the file into an emptyDir volume
at `/var/log/prometheus`. If a full path is provided, e.g. '/var/log/prometheus/query.log',
you must mount a volume in the specified directory and it must be
writable. This is because the prometheus container runs with a read-only
root filesystem for security reasons. Alternatively, the location
can be set to a standard I/O stream, e.g. `/dev/stdout`, to log
query information to the default Prometheus log stream."
type: string
reloadStrategy:
description: Defines the strategy used to reload the Prometheus configuration.
If not specified, the configuration is reloaded using the /-/reload
HTTP endpoint.
enum:
- HTTP
- ProcessSignal
type: string
remoteRead:
description: Defines the list of remote read configurations.
items:
description: RemoteReadSpec defines the configuration for Prometheus
to read back samples from a remote endpoint.
properties:
authorization:
description: "Authorization section for the URL. \n It requires
Prometheus >= v2.26.0. \n Cannot be set at the same time as
`basicAuth`, or `oauth2`."
properties:
credentials:
description: Selects a key of a Secret in the namespace
that contains the credentials for authentication.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
credentialsFile:
description: File to read a secret from, mutually exclusive
with `credentials`.
type: string
type:
description: "Defines the authentication type. The value
is case-insensitive. \n \"Basic\" is not a supported value.
\n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: "BasicAuth configuration for the URL. \n Cannot
be set at the same time as `authorization`, or `oauth2`."
properties:
password:
description: '`password` specifies a key of a Secret containing
the password for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a Secret containing
the username for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerToken:
description: "*Warning: this field shouldn't be used because
the token value appears in clear-text. Prefer using `authorization`.*
\n Deprecated: this will be removed in a future release."
type: string
bearerTokenFile:
description: "File from which to read the bearer token for the
URL. \n Deprecated: this will be removed in a future release.
Prefer using `authorization`."
type: string
filterExternalLabels:
description: "Whether to use the external labels as selectors
for the remote read endpoint. \n It requires Prometheus >=
v2.34.0."
type: boolean
followRedirects:
description: "Configure whether HTTP requests follow HTTP 3xx
redirects. \n It requires Prometheus >= v2.26.0."
type: boolean
headers:
additionalProperties:
type: string
description: Custom HTTP headers to be sent along with each
remote read request. Be aware that headers that are set by
Prometheus itself can't be overwritten. Only valid in Prometheus
versions 2.26.0 and newer.
type: object
name:
description: "The name of the remote read queue, it must be
unique if specified. The name is used in metrics and logging
in order to differentiate read configurations. \n It requires
Prometheus >= v2.15.0."
type: string
oauth2:
description: "OAuth2 configuration for the URL. \n It requires
Prometheus >= v2.27.0. \n Cannot be set at the same time as
`authorization`, or `basicAuth`."
properties:
clientId:
description: '`clientId` specifies a key of a Secret or
ConfigMap containing the OAuth2 client''s ID.'
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of a Secret
containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the HTTP parameters
to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes used for
the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to fetch the
token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyUrl:
description: Optional ProxyURL.
type: string
readRecent:
description: Whether reads should be made for queries for time
ranges that the local storage should have complete data for.
type: boolean
remoteTimeout:
description: Timeout for requests to the remote read endpoint.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
requiredMatchers:
additionalProperties:
type: string
description: An optional list of equality matchers which have
to be present in a selector to query the remote read endpoint.
type: object
tlsConfig:
description: TLS Config to use for the URL.
properties:
ca:
description: Certificate authority used when verifying server
certificates.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
caFile:
description: Path to the CA cert in the Prometheus container
to use for the targets.
type: string
cert:
description: Client certificate to present when doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
certFile:
description: Path to the client cert file in the Prometheus
container for the targets.
type: string
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keyFile:
description: Path to the client key file in the Prometheus
container for the targets.
type: string
keySecret:
description: Secret containing the client key file for the
targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
url:
description: The URL of the endpoint to query from.
type: string
required:
- url
type: object
type: array
remoteWrite:
description: Defines the list of remote write configurations.
items:
description: RemoteWriteSpec defines the configuration to write
samples from Prometheus to a remote endpoint.
properties:
authorization:
description: "Authorization section for the URL. \n It requires
Prometheus >= v2.26.0. \n Cannot be set at the same time as
`sigv4`, `basicAuth`, `oauth2`, or `azureAd`."
properties:
credentials:
description: Selects a key of a Secret in the namespace
that contains the credentials for authentication.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
credentialsFile:
description: File to read a secret from, mutually exclusive
with `credentials`.
type: string
type:
description: "Defines the authentication type. The value
is case-insensitive. \n \"Basic\" is not a supported value.
\n Default: \"Bearer\""
type: string
type: object
azureAd:
description: "AzureAD for the URL. \n It requires Prometheus
>= v2.45.0. \n Cannot be set at the same time as `authorization`,
`basicAuth`, `oauth2`, or `sigv4`."
properties:
cloud:
description: The Azure Cloud. Options are 'AzurePublic',
'AzureChina', or 'AzureGovernment'.
enum:
- AzureChina
- AzureGovernment
- AzurePublic
type: string
managedIdentity:
description: ManagedIdentity defines the Azure User-assigned
Managed identity. Cannot be set at the same time as `oauth`.
properties:
clientId:
description: The client id
type: string
required:
- clientId
type: object
oauth:
description: "OAuth defines the oauth config that is being
used to authenticate. Cannot be set at the same time as
`managedIdentity`. \n It requires Prometheus >= v2.48.0."
properties:
clientId:
description: '`clientID` is the clientId of the Azure
Active Directory application that is being used to
authenticate.'
minLength: 1
type: string
clientSecret:
description: '`clientSecret` specifies a key of a Secret
containing the client secret of the Azure Active Directory
application that is being used to authenticate.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
tenantId:
description: '`tenantID` is the tenant ID of the Azure
Active Directory application that is being used to
authenticate.'
minLength: 1
pattern: ^[0-9a-zA-Z-.]+$
type: string
required:
- clientId
- clientSecret
- tenantId
type: object
type: object
basicAuth:
description: "BasicAuth configuration for the URL. \n Cannot
be set at the same time as `sigv4`, `authorization`, `oauth2`,
or `azureAd`."
properties:
password:
description: '`password` specifies a key of a Secret containing
the password for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a Secret containing
the username for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerToken:
description: "*Warning: this field shouldn't be used because
the token value appears in clear-text. Prefer using `authorization`.*
\n Deprecated: this will be removed in a future release."
type: string
bearerTokenFile:
description: "File from which to read bearer token for the URL.
\n Deprecated: this will be removed in a future release. Prefer
using `authorization`."
type: string
enableHTTP2:
description: Whether to enable HTTP2.
type: boolean
headers:
additionalProperties:
type: string
description: "Custom HTTP headers to be sent along with each
remote write request. Be aware that headers that are set by
Prometheus itself can't be overwritten. \n It requires Prometheus
>= v2.25.0."
type: object
metadataConfig:
description: MetadataConfig configures the sending of series
metadata to the remote storage.
properties:
send:
description: Defines whether metric metadata is sent to
the remote storage or not.
type: boolean
sendInterval:
description: Defines how frequently metric metadata is sent
to the remote storage.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
type: object
name:
description: "The name of the remote write queue, it must be
unique if specified. The name is used in metrics and logging
in order to differentiate queues. \n It requires Prometheus
>= v2.15.0."
type: string
oauth2:
description: "OAuth2 configuration for the URL. \n It requires
Prometheus >= v2.27.0. \n Cannot be set at the same time as
`sigv4`, `authorization`, `basicAuth`, or `azureAd`."
properties:
clientId:
description: '`clientId` specifies a key of a Secret or
ConfigMap containing the OAuth2 client''s ID.'
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of a Secret
containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the HTTP parameters
to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes used for
the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to fetch the
token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyUrl:
description: Optional ProxyURL.
type: string
queueConfig:
description: QueueConfig allows tuning of the remote write queue
parameters.
properties:
batchSendDeadline:
description: BatchSendDeadline is the maximum time a sample
will wait in buffer.
type: string
capacity:
description: Capacity is the number of samples to buffer
per shard before we start dropping them.
type: integer
maxBackoff:
description: MaxBackoff is the maximum retry delay.
type: string
maxRetries:
description: MaxRetries is the maximum number of times to
retry a batch on recoverable errors.
type: integer
maxSamplesPerSend:
description: MaxSamplesPerSend is the maximum number of
samples per send.
type: integer
maxShards:
description: MaxShards is the maximum number of shards,
i.e. amount of concurrency.
type: integer
minBackoff:
description: MinBackoff is the initial retry delay. Gets
doubled for every retry.
type: string
minShards:
description: MinShards is the minimum number of shards,
i.e. amount of concurrency.
type: integer
retryOnRateLimit:
description: Retry upon receiving a 429 status code from
the remote-write storage. This is experimental feature
and might change in the future.
type: boolean
type: object
remoteTimeout:
description: Timeout for requests to the remote write endpoint.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
sendExemplars:
description: "Enables sending of exemplars over remote write.
Note that exemplar-storage itself must be enabled using the
`spec.enableFeature` option for exemplars to be scraped in
the first place. \n It requires Prometheus >= v2.27.0."
type: boolean
sendNativeHistograms:
description: "Enables sending of native histograms, also known
as sparse histograms over remote write. \n It requires Prometheus
>= v2.40.0."
type: boolean
sigv4:
description: "Sigv4 allows to configures AWS's Signature Verification
4 for the URL. \n It requires Prometheus >= v2.26.0. \n Cannot
be set at the same time as `authorization`, `basicAuth`, `oauth2`,
or `azureAd`."
properties:
accessKey:
description: AccessKey is the AWS API key. If not specified,
the environment variable `AWS_ACCESS_KEY_ID` is used.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
profile:
description: Profile is the named AWS profile used to authenticate.
type: string
region:
description: Region is the AWS region. If blank, the region
from the default credentials chain used.
type: string
roleArn:
description: RoleArn is the named AWS profile used to authenticate.
type: string
secretKey:
description: SecretKey is the AWS API secret. If not specified,
the environment variable `AWS_SECRET_ACCESS_KEY` is used.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
tlsConfig:
description: TLS Config to use for the URL.
properties:
ca:
description: Certificate authority used when verifying server
certificates.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
caFile:
description: Path to the CA cert in the Prometheus container
to use for the targets.
type: string
cert:
description: Client certificate to present when doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
certFile:
description: Path to the client cert file in the Prometheus
container for the targets.
type: string
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keyFile:
description: Path to the client key file in the Prometheus
container for the targets.
type: string
keySecret:
description: Secret containing the client key file for the
targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
url:
description: The URL of the endpoint to send samples to.
type: string
writeRelabelConfigs:
description: The list of remote write relabel configurations.
items:
description: "RelabelConfig allows dynamic rewriting of the
label set for targets, alerts, scraped samples and remote
write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config"
properties:
action:
default: replace
description: "Action to perform based on the regex matching.
\n `Uppercase` and `Lowercase` actions require Prometheus
>= v2.36.0. `DropEqual` and `KeepEqual` actions require
Prometheus >= v2.41.0. \n Default: \"Replace\""
enum:
- replace
- Replace
- keep
- Keep
- drop
- Drop
- hashmod
- HashMod
- labelmap
- LabelMap
- labeldrop
- LabelDrop
- labelkeep
- LabelKeep
- lowercase
- Lowercase
- uppercase
- Uppercase
- keepequal
- KeepEqual
- dropequal
- DropEqual
type: string
modulus:
description: "Modulus to take of the hash of the source
label values. \n Only applicable when the action is
`HashMod`."
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched.
type: string
replacement:
description: "Replacement value against which a Replace
action is performed if the regular expression matches.
\n Regex capture groups are available."
type: string
separator:
description: Separator is the string between concatenated
SourceLabels.
type: string
sourceLabels:
description: The source labels select values from existing
labels. Their content is concatenated using the configured
Separator and matched against the configured regular
expression.
items:
description: LabelName is a valid Prometheus label name
which may only contain ASCII letters, numbers, as
well as underscores.
pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
type: string
type: array
targetLabel:
description: "Label to which the resulting string is written
in a replacement. \n It is mandatory for `Replace`,
`HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and
`DropEqual` actions. \n Regex capture groups are available."
type: string
type: object
type: array
required:
- url
type: object
type: array
replicaExternalLabelName:
description: "Name of Prometheus external label used to denote the
replica name. The external label will _not_ be added when the field
is set to the empty string (`\"\"`). \n Default: \"prometheus_replica\""
type: string
replicas:
description: "Number of replicas of each shard to deploy for a Prometheus
deployment. `spec.replicas` multiplied by `spec.shards` is the total
number of Pods created. \n Default: 1"
format: int32
type: integer
resources:
description: Defines the resources requests and limits of the 'prometheus'
container.
properties:
claims:
description: "Claims lists the names of resources, defined in
spec.resourceClaims, that are used by this container. \n This
is an alpha field and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable. It can only be set
for containers."
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of one entry in pod.spec.resourceClaims
of the Pod where this field is used. It makes that resource
available inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount of compute resources
allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount of compute
resources required. If Requests is omitted for a container,
it defaults to Limits if that is explicitly specified, otherwise
to an implementation-defined value. Requests cannot exceed Limits.
More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
retention:
description: "How long to retain the Prometheus data. \n Default:
\"24h\" if `spec.retention` and `spec.retentionSize` are empty."
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
retentionSize:
description: Maximum number of bytes used by the Prometheus data.
pattern: (^0|([0-9]*[.])?[0-9]+((K|M|G|T|E|P)i?)?B)$
type: string
routePrefix:
description: "The route prefix Prometheus registers HTTP handlers
for. \n This is useful when using `spec.externalURL`, and a proxy
is rewriting HTTP routes of a request, and the actual ExternalURL
is still true, but the server serves requests under a different
route prefix. For example for use with `kubectl proxy`."
type: string
ruleNamespaceSelector:
description: Namespaces to match for PrometheusRule discovery. An
empty label selector matches all namespaces. A null label selector
matches the current namespace only.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
ruleSelector:
description: PrometheusRule objects to be selected for rule evaluation.
An empty label selector matches all objects. A null label selector
matches no objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
rules:
description: Defines the configuration of the Prometheus rules' engine.
properties:
alert:
description: "Defines the parameters of the Prometheus rules'
engine. \n Any update to these parameters trigger a restart
of the pods."
properties:
forGracePeriod:
description: "Minimum duration between alert and restored
'for' state. \n This is maintained only for alerts with
a configured 'for' time greater than the grace period."
type: string
forOutageTolerance:
description: Max time to tolerate prometheus outage for restoring
'for' state of alert.
type: string
resendDelay:
description: Minimum amount of time to wait before resending
an alert to Alertmanager.
type: string
type: object
type: object
sampleLimit:
description: SampleLimit defines per-scrape limit on number of scraped
samples that will be accepted. Only valid in Prometheus versions
2.45.0 and newer.
format: int64
type: integer
scrapeConfigNamespaceSelector:
description: Namespaces to match for ScrapeConfig discovery. An empty
label selector matches all namespaces. A null label selector matches
the current current namespace only.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
scrapeConfigSelector:
description: "*Experimental* ScrapeConfigs to be selected for target
discovery. An empty label selector matches all objects. A null label
selector matches no objects. \n If `spec.serviceMonitorSelector`,
`spec.podMonitorSelector`, `spec.probeSelector` and `spec.scrapeConfigSelector`
are null, the Prometheus configuration is unmanaged. The Prometheus
operator will ensure that the Prometheus configuration's Secret
exists, but it is the responsibility of the user to provide the
raw gzipped Prometheus configuration under the `prometheus.yaml.gz`
key. This behavior is *deprecated* and will be removed in the next
major version of the custom resource definition. It is recommended
to use `spec.additionalScrapeConfigs` instead."
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
scrapeInterval:
default: 30s
description: "Interval between consecutive scrapes. \n Default: \"30s\""
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
scrapeTimeout:
description: Number of seconds to wait until a scrape request times
out.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
secrets:
description: Secrets is a list of Secrets in the same namespace as
the Prometheus object, which shall be mounted into the Prometheus
Pods. Each Secret is added to the StatefulSet definition as a volume
named `secret-`. The Secrets are mounted into /etc/prometheus/secrets/
in the 'prometheus' container.
items:
type: string
type: array
securityContext:
description: SecurityContext holds pod-level security attributes and
common container settings. This defaults to the default PodSecurityContext.
properties:
fsGroup:
description: "A special supplemental group that applies to all
containers in a pod. Some volume types allow the Kubelet to
change the ownership of that volume to be owned by the pod:
\n 1. The owning GID will be the FSGroup 2. The setgid bit is
set (new files created in the volume will be owned by FSGroup)
3. The permission bits are OR'd with rw-rw---- \n If unset,
the Kubelet will not modify the ownership and permissions of
any volume. Note that this field cannot be set when spec.os.name
is windows."
format: int64
type: integer
fsGroupChangePolicy:
description: 'fsGroupChangePolicy defines behavior of changing
ownership and permission of the volume before being exposed
inside Pod. This field will only apply to volume types which
support fsGroup based ownership(and permissions). It will have
no effect on ephemeral volume types such as: secret, configmaps
and emptydir. Valid values are "OnRootMismatch" and "Always".
If not specified, "Always" is used. Note that this field cannot
be set when spec.os.name is windows.'
type: string
runAsGroup:
description: The GID to run the entrypoint of the container process.
Uses runtime default if unset. May also be set in SecurityContext. If
set in both SecurityContext and PodSecurityContext, the value
specified in SecurityContext takes precedence for that container.
Note that this field cannot be set when spec.os.name is windows.
format: int64
type: integer
runAsNonRoot:
description: Indicates that the container must run as a non-root
user. If true, the Kubelet will validate the image at runtime
to ensure that it does not run as UID 0 (root) and fail to start
the container if it does. If unset or false, no such validation
will be performed. May also be set in SecurityContext. If set
in both SecurityContext and PodSecurityContext, the value specified
in SecurityContext takes precedence.
type: boolean
runAsUser:
description: The UID to run the entrypoint of the container process.
Defaults to user specified in image metadata if unspecified.
May also be set in SecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence for that container. Note that this field cannot
be set when spec.os.name is windows.
format: int64
type: integer
seLinuxOptions:
description: The SELinux context to be applied to all containers.
If unspecified, the container runtime will allocate a random
SELinux context for each container. May also be set in SecurityContext. If
set in both SecurityContext and PodSecurityContext, the value
specified in SecurityContext takes precedence for that container.
Note that this field cannot be set when spec.os.name is windows.
properties:
level:
description: Level is SELinux level label that applies to
the container.
type: string
role:
description: Role is a SELinux role label that applies to
the container.
type: string
type:
description: Type is a SELinux type label that applies to
the container.
type: string
user:
description: User is a SELinux user label that applies to
the container.
type: string
type: object
seccompProfile:
description: The seccomp options to use by the containers in this
pod. Note that this field cannot be set when spec.os.name is
windows.
properties:
localhostProfile:
description: localhostProfile indicates a profile defined
in a file on the node should be used. The profile must be
preconfigured on the node to work. Must be a descending
path, relative to the kubelet's configured seccomp profile
location. Must be set if type is "Localhost". Must NOT be
set for any other type.
type: string
type:
description: "type indicates which kind of seccomp profile
will be applied. Valid options are: \n Localhost - a profile
defined in a file on the node should be used. RuntimeDefault
- the container runtime default profile should be used.
Unconfined - no profile should be applied."
type: string
required:
- type
type: object
supplementalGroups:
description: A list of groups applied to the first process run
in each container, in addition to the container's primary GID,
the fsGroup (if specified), and group memberships defined in
the container image for the uid of the container process. If
unspecified, no additional groups are added to any container.
Note that group memberships defined in the container image for
the uid of the container process are still effective, even if
they are not included in this list. Note that this field cannot
be set when spec.os.name is windows.
items:
format: int64
type: integer
type: array
sysctls:
description: Sysctls hold a list of namespaced sysctls used for
the pod. Pods with unsupported sysctls (by the container runtime)
might fail to launch. Note that this field cannot be set when
spec.os.name is windows.
items:
description: Sysctl defines a kernel parameter to be set
properties:
name:
description: Name of a property to set
type: string
value:
description: Value of a property to set
type: string
required:
- name
- value
type: object
type: array
windowsOptions:
description: The Windows specific settings applied to all containers.
If unspecified, the options within a container's SecurityContext
will be used. If set in both SecurityContext and PodSecurityContext,
the value specified in SecurityContext takes precedence. Note
that this field cannot be set when spec.os.name is linux.
properties:
gmsaCredentialSpec:
description: GMSACredentialSpec is where the GMSA admission
webhook (https://github.com/kubernetes-sigs/windows-gmsa)
inlines the contents of the GMSA credential spec named by
the GMSACredentialSpecName field.
type: string
gmsaCredentialSpecName:
description: GMSACredentialSpecName is the name of the GMSA
credential spec to use.
type: string
hostProcess:
description: HostProcess determines if a container should
be run as a 'Host Process' container. All of a Pod's containers
must have the same effective HostProcess value (it is not
allowed to have a mix of HostProcess containers and non-HostProcess
containers). In addition, if HostProcess is true then HostNetwork
must also be set to true.
type: boolean
runAsUserName:
description: The UserName in Windows to run the entrypoint
of the container process. Defaults to the user specified
in image metadata if unspecified. May also be set in PodSecurityContext.
If set in both SecurityContext and PodSecurityContext, the
value specified in SecurityContext takes precedence.
type: string
type: object
type: object
serviceAccountName:
description: ServiceAccountName is the name of the ServiceAccount
to use to run the Prometheus Pods.
type: string
serviceMonitorNamespaceSelector:
description: Namespaces to match for ServicedMonitors discovery. An
empty label selector matches all namespaces. A null label selector
matches the current namespace only.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
serviceMonitorSelector:
description: "ServiceMonitors to be selected for target discovery.
An empty label selector matches all objects. A null label selector
matches no objects. \n If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`,
`spec.probeSelector` and `spec.scrapeConfigSelector` are null, the
Prometheus configuration is unmanaged. The Prometheus operator will
ensure that the Prometheus configuration's Secret exists, but it
is the responsibility of the user to provide the raw gzipped Prometheus
configuration under the `prometheus.yaml.gz` key. This behavior
is *deprecated* and will be removed in the next major version of
the custom resource definition. It is recommended to use `spec.additionalScrapeConfigs`
instead."
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
sha:
description: 'Deprecated: use ''spec.image'' instead. The image''s
digest can be specified as part of the image name.'
type: string
shards:
description: "EXPERIMENTAL: Number of shards to distribute targets
onto. `spec.replicas` multiplied by `spec.shards` is the total number
of Pods created. \n Note that scaling down shards will not reshard
data onto remaining instances, it must be manually moved. Increasing
shards will not reshard data either but it will continue to be available
from the same instances. To query globally, use Thanos sidecar and
Thanos querier or remote write data to a central location. \n Sharding
is performed on the content of the `__address__` target meta-label
for PodMonitors and ServiceMonitors and `__param_target__` for Probes.
\n Default: 1"
format: int32
type: integer
storage:
description: Storage defines the storage used by Prometheus.
properties:
disableMountSubPath:
description: 'Deprecated: subPath usage will be removed in a future
release.'
type: boolean
emptyDir:
description: 'EmptyDirVolumeSource to be used by the StatefulSet.
If specified, it takes precedence over `ephemeral` and `volumeClaimTemplate`.
More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir'
properties:
medium:
description: 'medium represents what type of storage medium
should back this directory. The default is "" which means
to use the node''s default medium. Must be an empty string
(default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
type: string
sizeLimit:
anyOf:
- type: integer
- type: string
description: 'sizeLimit is the total amount of local storage
required for this EmptyDir volume. The size limit is also
applicable for memory medium. The maximum usage on memory
medium EmptyDir would be the minimum value between the SizeLimit
specified here and the sum of memory limits of all containers
in a pod. The default is nil which means that the limit
is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
type: object
ephemeral:
description: 'EphemeralVolumeSource to be used by the StatefulSet.
This is a beta field in k8s 1.21 and GA in 1.15. For lower versions,
starting with k8s 1.19, it requires enabling the GenericEphemeralVolume
feature gate. More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes'
properties:
volumeClaimTemplate:
description: "Will be used to create a stand-alone PVC to
provision the volume. The pod in which this EphemeralVolumeSource
is embedded will be the owner of the PVC, i.e. the PVC will
be deleted together with the pod. The name of the PVC will
be `-` where `` is the
name from the `PodSpec.Volumes` array entry. Pod validation
will reject the pod if the concatenated name is not valid
for a PVC (for example, too long). \n An existing PVC with
that name that is not owned by the pod will *not* be used
for the pod to avoid using an unrelated volume by mistake.
Starting the pod is then blocked until the unrelated PVC
is removed. If such a pre-created PVC is meant to be used
by the pod, the PVC has to updated with an owner reference
to the pod once the pod exists. Normally this should not
be necessary, but it may be useful when manually reconstructing
a broken cluster. \n This field is read-only and no changes
will be made by Kubernetes to the PVC after it has been
created. \n Required, must not be nil."
properties:
metadata:
description: May contain labels and annotations that will
be copied into the PVC when creating it. No other fields
are allowed and will be rejected during validation.
type: object
spec:
description: The specification for the PersistentVolumeClaim.
The entire content is copied unchanged into the PVC
that gets created from this template. The same fields
as in a PersistentVolumeClaim are also valid here.
properties:
accessModes:
description: 'accessModes contains the desired access
modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
items:
type: string
type: array
dataSource:
description: 'dataSource field can be used to specify
either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
* An existing PVC (PersistentVolumeClaim) If the
provisioner or an external controller can support
the specified data source, it will create a new
volume based on the contents of the specified data
source. When the AnyVolumeDataSource feature gate
is enabled, dataSource contents will be copied to
dataSourceRef, and dataSourceRef contents will be
copied to dataSource when dataSourceRef.namespace
is not specified. If the namespace is specified,
then dataSourceRef will not be copied to dataSource.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API group.
For any other third-party types, APIGroup is
required.
type: string
kind:
description: Kind is the type of resource being
referenced
type: string
name:
description: Name is the name of resource being
referenced
type: string
required:
- kind
- name
type: object
x-kubernetes-map-type: atomic
dataSourceRef:
description: 'dataSourceRef specifies the object from
which to populate the volume with data, if a non-empty
volume is desired. This may be any object from a
non-empty API group (non core object) or a PersistentVolumeClaim
object. When this field is specified, volume binding
will only succeed if the type of the specified object
matches some installed volume populator or dynamic
provisioner. This field will replace the functionality
of the dataSource field and as such if both fields
are non-empty, they must have the same value. For
backwards compatibility, when namespace isn''t specified
in dataSourceRef, both fields (dataSource and dataSourceRef)
will be set to the same value automatically if one
of them is empty and the other is non-empty. When
namespace is specified in dataSourceRef, dataSource
isn''t set to the same value and must be empty.
There are three important differences between dataSource
and dataSourceRef: * While dataSource only allows
two specific types of objects, dataSourceRef allows
any non-core object, as well as PersistentVolumeClaim
objects. * While dataSource ignores disallowed values
(dropping them), dataSourceRef preserves all values,
and generates an error if a disallowed value is
specified. * While dataSource only allows local
objects, dataSourceRef allows objects in any namespaces.
(Beta) Using this field requires the AnyVolumeDataSource
feature gate to be enabled. (Alpha) Using the namespace
field of dataSourceRef requires the CrossNamespaceVolumeDataSource
feature gate to be enabled.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API group.
For any other third-party types, APIGroup is
required.
type: string
kind:
description: Kind is the type of resource being
referenced
type: string
name:
description: Name is the name of resource being
referenced
type: string
namespace:
description: Namespace is the namespace of resource
being referenced Note that when a namespace
is specified, a gateway.networking.k8s.io/ReferenceGrant
object is required in the referent namespace
to allow that namespace's owner to accept the
reference. See the ReferenceGrant documentation
for details. (Alpha) This field requires the
CrossNamespaceVolumeDataSource feature gate
to be enabled.
type: string
required:
- kind
- name
type: object
resources:
description: 'resources represents the minimum resources
the volume should have. If RecoverVolumeExpansionFailure
feature is enabled users are allowed to specify
resource requirements that are lower than previous
value but must still be higher than capacity recorded
in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
properties:
claims:
description: "Claims lists the names of resources,
defined in spec.resourceClaims, that are used
by this container. \n This is an alpha field
and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable. It
can only be set for containers."
items:
description: ResourceClaim references one entry
in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of
one entry in pod.spec.resourceClaims of
the Pod where this field is used. It makes
that resource available inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount
of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount
of compute resources required. If Requests is
omitted for a container, it defaults to Limits
if that is explicitly specified, otherwise to
an implementation-defined value. Requests cannot
exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
selector:
description: selector is a label query over volumes
to consider for binding.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement is
a selector that contains values, a key, and
an operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If
the operator is Exists or DoesNotExist,
the values array must be empty. This array
is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
storageClassName:
description: 'storageClassName is the name of the
StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
type: string
volumeMode:
description: volumeMode defines what type of volume
is required by the claim. Value of Filesystem is
implied when not included in claim spec.
type: string
volumeName:
description: volumeName is the binding reference to
the PersistentVolume backing this claim.
type: string
type: object
required:
- spec
type: object
type: object
volumeClaimTemplate:
description: Defines the PVC spec to be used by the Prometheus
StatefulSets. The easiest way to use a volume that cannot be
automatically provisioned is to use a label selector alongside
manually created PersistentVolumes.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this
representation of an object. Servers should convert recognized
schemas to the latest internal value, and may reject unrecognized
values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST
resource this object represents. Servers may infer this
from the endpoint the client submits requests to. Cannot
be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
description: EmbeddedMetadata contains metadata relevant to
an EmbeddedResource.
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value
map stored with a resource that may be set by external
tools to store and retrieve arbitrary metadata. They
are not queryable and should be preserved when modifying
objects. More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be
used to organize and categorize (scope and select) objects.
May match selectors of replication controllers and services.
More info: http://kubernetes.io/docs/user-guide/labels'
type: object
name:
description: 'Name must be unique within a namespace.
Is required when creating resources, although some resources
may allow a client to request the generation of an appropriate
name automatically. Name is primarily intended for creation
idempotence and configuration definition. Cannot be
updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
type: object
spec:
description: 'Defines the desired characteristics of a volume
requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
properties:
accessModes:
description: 'accessModes contains the desired access
modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
items:
type: string
type: array
dataSource:
description: 'dataSource field can be used to specify
either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
* An existing PVC (PersistentVolumeClaim) If the provisioner
or an external controller can support the specified
data source, it will create a new volume based on the
contents of the specified data source. When the AnyVolumeDataSource
feature gate is enabled, dataSource contents will be
copied to dataSourceRef, and dataSourceRef contents
will be copied to dataSource when dataSourceRef.namespace
is not specified. If the namespace is specified, then
dataSourceRef will not be copied to dataSource.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API group.
For any other third-party types, APIGroup is required.
type: string
kind:
description: Kind is the type of resource being referenced
type: string
name:
description: Name is the name of resource being referenced
type: string
required:
- kind
- name
type: object
x-kubernetes-map-type: atomic
dataSourceRef:
description: 'dataSourceRef specifies the object from
which to populate the volume with data, if a non-empty
volume is desired. This may be any object from a non-empty
API group (non core object) or a PersistentVolumeClaim
object. When this field is specified, volume binding
will only succeed if the type of the specified object
matches some installed volume populator or dynamic provisioner.
This field will replace the functionality of the dataSource
field and as such if both fields are non-empty, they
must have the same value. For backwards compatibility,
when namespace isn''t specified in dataSourceRef, both
fields (dataSource and dataSourceRef) will be set to
the same value automatically if one of them is empty
and the other is non-empty. When namespace is specified
in dataSourceRef, dataSource isn''t set to the same
value and must be empty. There are three important differences
between dataSource and dataSourceRef: * While dataSource
only allows two specific types of objects, dataSourceRef
allows any non-core object, as well as PersistentVolumeClaim
objects. * While dataSource ignores disallowed values
(dropping them), dataSourceRef preserves all values,
and generates an error if a disallowed value is specified.
* While dataSource only allows local objects, dataSourceRef
allows objects in any namespaces. (Beta) Using this
field requires the AnyVolumeDataSource feature gate
to be enabled. (Alpha) Using the namespace field of
dataSourceRef requires the CrossNamespaceVolumeDataSource
feature gate to be enabled.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API group.
For any other third-party types, APIGroup is required.
type: string
kind:
description: Kind is the type of resource being referenced
type: string
name:
description: Name is the name of resource being referenced
type: string
namespace:
description: Namespace is the namespace of resource
being referenced Note that when a namespace is specified,
a gateway.networking.k8s.io/ReferenceGrant object
is required in the referent namespace to allow that
namespace's owner to accept the reference. See the
ReferenceGrant documentation for details. (Alpha)
This field requires the CrossNamespaceVolumeDataSource
feature gate to be enabled.
type: string
required:
- kind
- name
type: object
resources:
description: 'resources represents the minimum resources
the volume should have. If RecoverVolumeExpansionFailure
feature is enabled users are allowed to specify resource
requirements that are lower than previous value but
must still be higher than capacity recorded in the status
field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
properties:
claims:
description: "Claims lists the names of resources,
defined in spec.resourceClaims, that are used by
this container. \n This is an alpha field and requires
enabling the DynamicResourceAllocation feature gate.
\n This field is immutable. It can only be set for
containers."
items:
description: ResourceClaim references one entry
in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of one
entry in pod.spec.resourceClaims of the Pod
where this field is used. It makes that resource
available inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount
of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount
of compute resources required. If Requests is omitted
for a container, it defaults to Limits if that is
explicitly specified, otherwise to an implementation-defined
value. Requests cannot exceed Limits. More info:
https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
selector:
description: selector is a label query over volumes to
consider for binding.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector
that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In,
NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values.
If the operator is In or NotIn, the values
array must be non-empty. If the operator is
Exists or DoesNotExist, the values array must
be empty. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field
is "key", the operator is "In", and the values array
contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
storageClassName:
description: 'storageClassName is the name of the StorageClass
required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
type: string
volumeMode:
description: volumeMode defines what type of volume is
required by the claim. Value of Filesystem is implied
when not included in claim spec.
type: string
volumeName:
description: volumeName is the binding reference to the
PersistentVolume backing this claim.
type: string
type: object
status:
description: 'Deprecated: this field is never set.'
properties:
accessModes:
description: 'accessModes contains the actual access modes
the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
items:
type: string
type: array
allocatedResourceStatuses:
additionalProperties:
description: When a controller receives persistentvolume
claim update with ClaimResourceStatus for a resource
that it does not recognizes, then it should ignore
that update and let other controllers handle it.
type: string
description: "allocatedResourceStatuses stores status
of resource being resized for the given PVC. Key names
follow standard Kubernetes label syntax. Valid values
are either: * Un-prefixed keys: - storage - the capacity
of the volume. * Custom resources must use implementation-defined
prefixed names such as \"example.com/my-custom-resource\"
Apart from above values - keys that are unprefixed or
have kubernetes.io prefix are considered reserved and
hence may not be used. \n ClaimResourceStatus can be
in any of following states: - ControllerResizeInProgress:
State set when resize controller starts resizing the
volume in control-plane. - ControllerResizeFailed: State
set when resize has failed in resize controller with
a terminal error. - NodeResizePending: State set when
resize controller has finished resizing the volume but
further resizing of volume is needed on the node. -
NodeResizeInProgress: State set when kubelet starts
resizing the volume. - NodeResizeFailed: State set when
resizing has failed in kubelet with a terminal error.
Transient errors don't set NodeResizeFailed. For example:
if expanding a PVC for more capacity - this field can
be one of the following states: - pvc.status.allocatedResourceStatus['storage']
= \"ControllerResizeInProgress\" - pvc.status.allocatedResourceStatus['storage']
= \"ControllerResizeFailed\" - pvc.status.allocatedResourceStatus['storage']
= \"NodeResizePending\" - pvc.status.allocatedResourceStatus['storage']
= \"NodeResizeInProgress\" - pvc.status.allocatedResourceStatus['storage']
= \"NodeResizeFailed\" When this field is not set, it
means that no resize operation is in progress for the
given PVC. \n A controller that receives PVC update
with previously unknown resourceName or ClaimResourceStatus
should ignore the update for the purpose it was designed.
For example - a controller that only is responsible
for resizing capacity of the volume, should ignore PVC
updates that change other valid resources associated
with PVC. \n This is an alpha field and requires enabling
RecoverVolumeExpansionFailure feature."
type: object
x-kubernetes-map-type: granular
allocatedResources:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: "allocatedResources tracks the resources
allocated to a PVC including its capacity. Key names
follow standard Kubernetes label syntax. Valid values
are either: * Un-prefixed keys: - storage - the capacity
of the volume. * Custom resources must use implementation-defined
prefixed names such as \"example.com/my-custom-resource\"
Apart from above values - keys that are unprefixed or
have kubernetes.io prefix are considered reserved and
hence may not be used. \n Capacity reported here may
be larger than the actual capacity when a volume expansion
operation is requested. For storage quota, the larger
value from allocatedResources and PVC.spec.resources
is used. If allocatedResources is not set, PVC.spec.resources
alone is used for quota calculation. If a volume expansion
capacity request is lowered, allocatedResources is only
lowered if there are no expansion operations in progress
and if the actual volume capacity is equal or lower
than the requested capacity. \n A controller that receives
PVC update with previously unknown resourceName should
ignore the update for the purpose it was designed. For
example - a controller that only is responsible for
resizing capacity of the volume, should ignore PVC updates
that change other valid resources associated with PVC.
\n This is an alpha field and requires enabling RecoverVolumeExpansionFailure
feature."
type: object
capacity:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: capacity represents the actual resources
of the underlying volume.
type: object
conditions:
description: conditions is the current Condition of persistent
volume claim. If underlying persistent volume is being
resized then the Condition will be set to 'ResizeStarted'.
items:
description: PersistentVolumeClaimCondition contains
details about state of pvc
properties:
lastProbeTime:
description: lastProbeTime is the time we probed
the condition.
format: date-time
type: string
lastTransitionTime:
description: lastTransitionTime is the time the
condition transitioned from one status to another.
format: date-time
type: string
message:
description: message is the human-readable message
indicating details about last transition.
type: string
reason:
description: reason is a unique, this should be
a short, machine understandable string that gives
the reason for condition's last transition. If
it reports "ResizeStarted" that means the underlying
persistent volume is being resized.
type: string
status:
type: string
type:
description: PersistentVolumeClaimConditionType
is a valid value of PersistentVolumeClaimCondition.Type
type: string
required:
- status
- type
type: object
type: array
phase:
description: phase represents the current phase of PersistentVolumeClaim.
type: string
type: object
type: object
type: object
tag:
description: 'Deprecated: use ''spec.image'' instead. The image''s
tag can be specified as part of the image name.'
type: string
targetLimit:
description: TargetLimit defines a limit on the number of scraped
targets that will be accepted. Only valid in Prometheus versions
2.45.0 and newer.
format: int64
type: integer
thanos:
description: "Defines the configuration of the optional Thanos sidecar.
\n This section is experimental, it may change significantly without
deprecation notice in any release."
properties:
additionalArgs:
description: AdditionalArgs allows setting additional arguments
for the Thanos container. The arguments are passed as-is to
the Thanos container which may cause issues if they are invalid
or not supported the given Thanos version. In case of an argument
conflict (e.g. an argument which is already set by the operator
itself) or when providing an invalid argument, the reconciliation
will fail and an error will be logged.
items:
description: Argument as part of the AdditionalArgs list.
properties:
name:
description: Name of the argument, e.g. "scrape.discovery-reload-interval".
minLength: 1
type: string
value:
description: Argument value, e.g. 30s. Can be empty for
name-only arguments (e.g. --storage.tsdb.no-lockfile)
type: string
required:
- name
type: object
type: array
baseImage:
description: 'Deprecated: use ''image'' instead.'
type: string
blockSize:
default: 2h
description: "BlockDuration controls the size of TSDB blocks produced
by Prometheus. The default value is 2h to match the upstream
Prometheus defaults. \n WARNING: Changing the block duration
can impact the performance and efficiency of the entire Prometheus/Thanos
stack due to how it interacts with memory and Thanos compactors.
It is recommended to keep this value set to a multiple of 120
times your longest scrape or rule interval. For example, 30s
* 120 = 1h."
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
getConfigInterval:
description: How often to retrieve the Prometheus configuration.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
getConfigTimeout:
description: Maximum time to wait when retrieving the Prometheus
configuration.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
grpcListenLocal:
description: "When true, the Thanos sidecar listens on the loopback
interface instead of the Pod IP's address for the gRPC endpoints.
\n It has no effect if `listenLocal` is true."
type: boolean
grpcServerTlsConfig:
description: "Configures the TLS parameters for the gRPC server
providing the StoreAPI. \n Note: Currently only the `caFile`,
`certFile`, and `keyFile` fields are supported."
properties:
ca:
description: Certificate authority used when verifying server
certificates.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
caFile:
description: Path to the CA cert in the Prometheus container
to use for the targets.
type: string
cert:
description: Client certificate to present when doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
certFile:
description: Path to the client cert file in the Prometheus
container for the targets.
type: string
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keyFile:
description: Path to the client key file in the Prometheus
container for the targets.
type: string
keySecret:
description: Secret containing the client key file for the
targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
httpListenLocal:
description: "When true, the Thanos sidecar listens on the loopback
interface instead of the Pod IP's address for the HTTP endpoints.
\n It has no effect if `listenLocal` is true."
type: boolean
image:
description: "Container image name for Thanos. If specified, it
takes precedence over the `spec.thanos.baseImage`, `spec.thanos.tag`
and `spec.thanos.sha` fields. \n Specifying `spec.thanos.version`
is still necessary to ensure the Prometheus Operator knows which
version of Thanos is being configured. \n If neither `spec.thanos.image`
nor `spec.thanos.baseImage` are defined, the operator will use
the latest upstream version of Thanos available at the time
when the operator was released."
type: string
listenLocal:
description: 'Deprecated: use `grpcListenLocal` and `httpListenLocal`
instead.'
type: boolean
logFormat:
description: Log format for the Thanos sidecar.
enum:
- ""
- logfmt
- json
type: string
logLevel:
description: Log level for the Thanos sidecar.
enum:
- ""
- debug
- info
- warn
- error
type: string
minTime:
description: Defines the start of time range limit served by the
Thanos sidecar's StoreAPI. The field's value should be a constant
time in RFC3339 format or a time duration relative to current
time, such as -1d or 2h45m. Valid duration units are ms, s,
m, h, d, w, y.
type: string
objectStorageConfig:
description: "Defines the Thanos sidecar's configuration to upload
TSDB blocks to object storage. \n More info: https://thanos.io/tip/thanos/storage.md/
\n objectStorageConfigFile takes precedence over this field."
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
objectStorageConfigFile:
description: "Defines the Thanos sidecar's configuration file
to upload TSDB blocks to object storage. \n More info: https://thanos.io/tip/thanos/storage.md/
\n This field takes precedence over objectStorageConfig."
type: string
readyTimeout:
description: ReadyTimeout is the maximum time that the Thanos
sidecar will wait for Prometheus to start.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
resources:
description: Defines the resources requests and limits of the
Thanos sidecar.
properties:
claims:
description: "Claims lists the names of resources, defined
in spec.resourceClaims, that are used by this container.
\n This is an alpha field and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable. It can only be
set for containers."
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of one entry in
pod.spec.resourceClaims of the Pod where this field
is used. It makes that resource available inside a
container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount of compute
resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount of compute
resources required. If Requests is omitted for a container,
it defaults to Limits if that is explicitly specified, otherwise
to an implementation-defined value. Requests cannot exceed
Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
sha:
description: 'Deprecated: use ''image'' instead. The image digest
can be specified as part of the image name.'
type: string
tag:
description: 'Deprecated: use ''image'' instead. The image''s
tag can be specified as as part of the image name.'
type: string
tracingConfig:
description: "Defines the tracing configuration for the Thanos
sidecar. \n More info: https://thanos.io/tip/thanos/tracing.md/
\n This is an experimental feature, it may change in any upcoming
release in a breaking way. \n tracingConfigFile takes precedence
over this field."
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
tracingConfigFile:
description: "Defines the tracing configuration file for the Thanos
sidecar. \n More info: https://thanos.io/tip/thanos/tracing.md/
\n This is an experimental feature, it may change in any upcoming
release in a breaking way. \n This field takes precedence over
tracingConfig."
type: string
version:
description: "Version of Thanos being deployed. The operator uses
this information to generate the Prometheus StatefulSet + configuration
files. \n If not specified, the operator assumes the latest
upstream release of Thanos available at the time when the version
of the operator was released."
type: string
volumeMounts:
description: VolumeMounts allows configuration of additional VolumeMounts
for Thanos. VolumeMounts specified will be appended to other
VolumeMounts in the 'thanos-sidecar' container.
items:
description: VolumeMount describes a mounting of a Volume within
a container.
properties:
mountPath:
description: Path within the container at which the volume
should be mounted. Must not contain ':'.
type: string
mountPropagation:
description: mountPropagation determines how mounts are
propagated from the host to container and the other way
around. When not set, MountPropagationNone is used. This
field is beta in 1.10.
type: string
name:
description: This must match the Name of a Volume.
type: string
readOnly:
description: Mounted read-only if true, read-write otherwise
(false or unspecified). Defaults to false.
type: boolean
subPath:
description: Path within the volume from which the container's
volume should be mounted. Defaults to "" (volume's root).
type: string
subPathExpr:
description: Expanded path within the volume from which
the container's volume should be mounted. Behaves similarly
to SubPath but environment variable references $(VAR_NAME)
are expanded using the container's environment. Defaults
to "" (volume's root). SubPathExpr and SubPath are mutually
exclusive.
type: string
required:
- mountPath
- name
type: object
type: array
type: object
tolerations:
description: Defines the Pods' tolerations if specified.
items:
description: The pod this Toleration is attached to tolerates any
taint that matches the triple using the matching
operator .
properties:
effect:
description: Effect indicates the taint effect to match. Empty
means match all taint effects. When specified, allowed values
are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: Key is the taint key that the toleration applies
to. Empty means match all taint keys. If the key is empty,
operator must be Exists; this combination means to match all
values and all keys.
type: string
operator:
description: Operator represents a key's relationship to the
value. Valid operators are Exists and Equal. Defaults to Equal.
Exists is equivalent to wildcard for value, so that a pod
can tolerate all taints of a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period of time
the toleration (which must be of effect NoExecute, otherwise
this field is ignored) tolerates the taint. By default, it
is not set, which means tolerate the taint forever (do not
evict). Zero and negative values will be treated as 0 (evict
immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration matches
to. If the operator is Exists, the value should be empty,
otherwise just a regular string.
type: string
type: object
type: array
topologySpreadConstraints:
description: Defines the pod's topology spread constraints if specified.
items:
properties:
additionalLabelSelectors:
description: Defines what Prometheus Operator managed labels
should be added to labelSelector on the topologySpreadConstraint.
enum:
- OnResource
- OnShard
type: string
labelSelector:
description: LabelSelector is used to find matching pods. Pods
that match this label selector are counted to determine the
number of pods in their corresponding topology domain.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector
that contains values, a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In, NotIn,
Exists and DoesNotExist.
type: string
values:
description: values is an array of string values.
If the operator is In or NotIn, the values array
must be non-empty. If the operator is Exists or
DoesNotExist, the values array must be empty. This
array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field is
"key", the operator is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
matchLabelKeys:
description: "MatchLabelKeys is a set of pod label keys to select
the pods over which spreading will be calculated. The keys
are used to lookup values from the incoming pod labels, those
key-value labels are ANDed with labelSelector to select the
group of existing pods over which spreading will be calculated
for the incoming pod. The same key is forbidden to exist in
both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot
be set when LabelSelector isn't set. Keys that don't exist
in the incoming pod labels will be ignored. A null or empty
list means only match against labelSelector. \n This is a
beta field and requires the MatchLabelKeysInPodTopologySpread
feature gate to be enabled (enabled by default)."
items:
type: string
type: array
x-kubernetes-list-type: atomic
maxSkew:
description: 'MaxSkew describes the degree to which pods may
be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`,
it is the maximum permitted difference between the number
of matching pods in the target topology and the global minimum.
The global minimum is the minimum number of matching pods
in an eligible domain or zero if the number of eligible domains
is less than MinDomains. For example, in a 3-zone cluster,
MaxSkew is set to 1, and pods with the same labelSelector
spread as 2/2/1: In this case, the global minimum is 1. |
zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew
is 1, incoming pod can only be scheduled to zone3 to become
2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1)
on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming
pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`,
it is used to give higher precedence to topologies that satisfy
it. It''s a required field. Default value is 1 and 0 is not
allowed.'
format: int32
type: integer
minDomains:
description: "MinDomains indicates a minimum number of eligible
domains. When the number of eligible domains with matching
topology keys is less than minDomains, Pod Topology Spread
treats \"global minimum\" as 0, and then the calculation of
Skew is performed. And when the number of eligible domains
with matching topology keys equals or greater than minDomains,
this value has no effect on scheduling. As a result, when
the number of eligible domains is less than minDomains, scheduler
won't schedule more than maxSkew Pods to those domains. If
value is nil, the constraint behaves as if MinDomains is equal
to 1. Valid values are integers greater than 0. When value
is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For
example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains
is set to 5 and pods with the same labelSelector spread as
2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P |
The number of domains is less than 5(MinDomains), so \"global
minimum\" is treated as 0. In this situation, new pod with
the same labelSelector cannot be scheduled, because computed
skew will be 3(3 - 0) if new Pod is scheduled to any of the
three zones, it will violate MaxSkew. \n This is a beta field
and requires the MinDomainsInPodTopologySpread feature gate
to be enabled (enabled by default)."
format: int32
type: integer
nodeAffinityPolicy:
description: "NodeAffinityPolicy indicates how we will treat
Pod's nodeAffinity/nodeSelector when calculating pod topology
spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector
are included in the calculations. - Ignore: nodeAffinity/nodeSelector
are ignored. All nodes are included in the calculations. \n
If this value is nil, the behavior is equivalent to the Honor
policy. This is a beta-level feature default enabled by the
NodeInclusionPolicyInPodTopologySpread feature flag."
type: string
nodeTaintsPolicy:
description: "NodeTaintsPolicy indicates how we will treat node
taints when calculating pod topology spread skew. Options
are: - Honor: nodes without taints, along with tainted nodes
for which the incoming pod has a toleration, are included.
- Ignore: node taints are ignored. All nodes are included.
\n If this value is nil, the behavior is equivalent to the
Ignore policy. This is a beta-level feature default enabled
by the NodeInclusionPolicyInPodTopologySpread feature flag."
type: string
topologyKey:
description: TopologyKey is the key of node labels. Nodes that
have a label with this key and identical values are considered
to be in the same topology. We consider each
as a "bucket", and try to put balanced number of pods into
each bucket. We define a domain as a particular instance of
a topology. Also, we define an eligible domain as a domain
whose nodes meet the requirements of nodeAffinityPolicy and
nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname",
each Node is a domain of that topology. And, if TopologyKey
is "topology.kubernetes.io/zone", each zone is a domain of
that topology. It's a required field.
type: string
whenUnsatisfiable:
description: 'WhenUnsatisfiable indicates how to deal with a
pod if it doesn''t satisfy the spread constraint. - DoNotSchedule
(default) tells the scheduler not to schedule it. - ScheduleAnyway
tells the scheduler to schedule the pod in any location, but
giving higher precedence to topologies that would help reduce
the skew. A constraint is considered "Unsatisfiable" for an
incoming pod if and only if every possible node assignment
for that pod would violate "MaxSkew" on some topology. For
example, in a 3-zone cluster, MaxSkew is set to 1, and pods
with the same labelSelector spread as 3/1/1: | zone1 | zone2
| zone3 | | P P P | P | P | If WhenUnsatisfiable is
set to DoNotSchedule, incoming pod can only be scheduled to
zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on
zone2(zone3) satisfies MaxSkew(1). In other words, the cluster
can still be imbalanced, but scheduler won''t make it *more*
imbalanced. It''s a required field.'
type: string
required:
- maxSkew
- topologyKey
- whenUnsatisfiable
type: object
type: array
tracingConfig:
description: 'EXPERIMENTAL: TracingConfig configures tracing in Prometheus.
This is an experimental feature, it may change in any upcoming release
in a breaking way.'
properties:
clientType:
description: Client used to export the traces. Supported values
are `http` or `grpc`.
enum:
- http
- grpc
type: string
compression:
description: Compression key for supported compression types.
The only supported value is `gzip`.
enum:
- gzip
type: string
endpoint:
description: Endpoint to send the traces to. Should be provided
in format :.
minLength: 1
type: string
headers:
additionalProperties:
type: string
description: Key-value pairs to be used as headers associated
with gRPC or HTTP requests.
type: object
insecure:
description: If disabled, the client will use a secure connection.
type: boolean
samplingFraction:
anyOf:
- type: integer
- type: string
description: Sets the probability a given trace will be sampled.
Must be a float from 0 through 1.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
timeout:
description: Maximum time the exporter will wait for each batch
export.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
tlsConfig:
description: TLS Config to use when sending traces.
properties:
ca:
description: Certificate authority used when verifying server
certificates.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
caFile:
description: Path to the CA cert in the Prometheus container
to use for the targets.
type: string
cert:
description: Client certificate to present when doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
certFile:
description: Path to the client cert file in the Prometheus
container for the targets.
type: string
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keyFile:
description: Path to the client key file in the Prometheus
container for the targets.
type: string
keySecret:
description: Secret containing the client key file for the
targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
required:
- endpoint
type: object
tsdb:
description: Defines the runtime reloadable configuration of the timeseries
database (TSDB).
properties:
outOfOrderTimeWindow:
description: "Configures how old an out-of-order/out-of-bounds
sample can be with respect to the TSDB max time. \n An out-of-order/out-of-bounds
sample is ingested into the TSDB as long as the timestamp of
the sample is >= (TSDB.MaxTime - outOfOrderTimeWindow). \n Out
of order ingestion is an experimental feature. \n It requires
Prometheus >= v2.39.0."
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
type: object
version:
description: "Version of Prometheus being deployed. The operator uses
this information to generate the Prometheus StatefulSet + configuration
files. \n If not specified, the operator assumes the latest upstream
version of Prometheus available at the time when the version of
the operator was released."
type: string
volumeMounts:
description: "VolumeMounts allows the configuration of additional
VolumeMounts. \n VolumeMounts will be appended to other VolumeMounts
in the 'prometheus' container, that are generated as a result of
StorageSpec objects."
items:
description: VolumeMount describes a mounting of a Volume within
a container.
properties:
mountPath:
description: Path within the container at which the volume should
be mounted. Must not contain ':'.
type: string
mountPropagation:
description: mountPropagation determines how mounts are propagated
from the host to container and the other way around. When
not set, MountPropagationNone is used. This field is beta
in 1.10.
type: string
name:
description: This must match the Name of a Volume.
type: string
readOnly:
description: Mounted read-only if true, read-write otherwise
(false or unspecified). Defaults to false.
type: boolean
subPath:
description: Path within the volume from which the container's
volume should be mounted. Defaults to "" (volume's root).
type: string
subPathExpr:
description: Expanded path within the volume from which the
container's volume should be mounted. Behaves similarly to
SubPath but environment variable references $(VAR_NAME) are
expanded using the container's environment. Defaults to ""
(volume's root). SubPathExpr and SubPath are mutually exclusive.
type: string
required:
- mountPath
- name
type: object
type: array
volumes:
description: Volumes allows the configuration of additional volumes
on the output StatefulSet definition. Volumes specified will be
appended to other volumes that are generated as a result of StorageSpec
objects.
items:
description: Volume represents a named volume in a pod that may
be accessed by any container in the pod.
properties:
awsElasticBlockStore:
description: 'awsElasticBlockStore represents an AWS Disk resource
that is attached to a kubelet''s host machine and then exposed
to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore'
properties:
fsType:
description: 'fsType is the filesystem type of the volume
that you want to mount. Tip: Ensure that the filesystem
type is supported by the host operating system. Examples:
"ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
TODO: how do we prevent errors in the filesystem from
compromising the machine'
type: string
partition:
description: 'partition is the partition in the volume that
you want to mount. If omitted, the default is to mount
by volume name. Examples: For volume /dev/sda1, you specify
the partition as "1". Similarly, the volume partition
for /dev/sda is "0" (or you can leave the property empty).'
format: int32
type: integer
readOnly:
description: 'readOnly value true will force the readOnly
setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore'
type: boolean
volumeID:
description: 'volumeID is unique ID of the persistent disk
resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore'
type: string
required:
- volumeID
type: object
azureDisk:
description: azureDisk represents an Azure Data Disk mount on
the host and bind mount to the pod.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode: None,
Read Only, Read Write.'
type: string
diskName:
description: diskName is the Name of the data disk in the
blob storage
type: string
diskURI:
description: diskURI is the URI of data disk in the blob
storage
type: string
fsType:
description: fsType is Filesystem type to mount. Must be
a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified.
type: string
kind:
description: 'kind expected values are Shared: multiple
blob disks per storage account Dedicated: single blob
disk per storage account Managed: azure managed data
disk (only in managed availability set). defaults to shared'
type: string
readOnly:
description: readOnly Defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
type: boolean
required:
- diskName
- diskURI
type: object
azureFile:
description: azureFile represents an Azure File Service mount
on the host and bind mount to the pod.
properties:
readOnly:
description: readOnly defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
type: boolean
secretName:
description: secretName is the name of secret that contains
Azure Storage Account Name and Key
type: string
shareName:
description: shareName is the azure share Name
type: string
required:
- secretName
- shareName
type: object
cephfs:
description: cephFS represents a Ceph FS mount on the host that
shares a pod's lifetime
properties:
monitors:
description: 'monitors is Required: Monitors is a collection
of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it'
items:
type: string
type: array
path:
description: 'path is Optional: Used as the mounted root,
rather than the full Ceph tree, default is /'
type: string
readOnly:
description: 'readOnly is Optional: Defaults to false (read/write).
ReadOnly here will force the ReadOnly setting in VolumeMounts.
More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it'
type: boolean
secretFile:
description: 'secretFile is Optional: SecretFile is the
path to key ring for User, default is /etc/ceph/user.secret
More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it'
type: string
secretRef:
description: 'secretRef is Optional: SecretRef is reference
to the authentication secret for User, default is empty.
More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it'
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
user:
description: 'user is optional: User is the rados user name,
default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it'
type: string
required:
- monitors
type: object
cinder:
description: 'cinder represents a cinder volume attached and
mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md'
properties:
fsType:
description: 'fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Examples: "ext4", "xfs", "ntfs". Implicitly inferred to
be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md'
type: string
readOnly:
description: 'readOnly defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md'
type: boolean
secretRef:
description: 'secretRef is optional: points to a secret
object containing parameters used to connect to OpenStack.'
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
volumeID:
description: 'volumeID used to identify the volume in cinder.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md'
type: string
required:
- volumeID
type: object
configMap:
description: configMap represents a configMap that should populate
this volume
properties:
defaultMode:
description: 'defaultMode is optional: mode bits used to
set permissions on created files by default. Must be an
octal value between 0000 and 0777 or a decimal value between
0 and 511. YAML accepts both octal and decimal values,
JSON requires decimal values for mode bits. Defaults to
0644. Directories within the path are not affected by
this setting. This might be in conflict with other options
that affect the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
items:
description: items if unspecified, each key-value pair in
the Data field of the referenced ConfigMap will be projected
into the volume as a file whose name is the key and content
is the value. If specified, the listed keys will be projected
into the specified paths, and unlisted keys will not be
present. If a key is specified which is not present in
the ConfigMap, the volume setup will error unless it is
marked optional. Paths must be relative and may not contain
the '..' path or start with '..'.
items:
description: Maps a string key to a path within a volume.
properties:
key:
description: key is the key to project.
type: string
mode:
description: 'mode is Optional: mode bits used to
set permissions on this file. Must be an octal value
between 0000 and 0777 or a decimal value between
0 and 511. YAML accepts both octal and decimal values,
JSON requires decimal values for mode bits. If not
specified, the volume defaultMode will be used.
This might be in conflict with other options that
affect the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
path:
description: path is the relative path of the file
to map the key to. May not be an absolute path.
May not contain the path element '..'. May not start
with the string '..'.
type: string
required:
- key
- path
type: object
type: array
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: optional specify whether the ConfigMap or its
keys must be defined
type: boolean
type: object
x-kubernetes-map-type: atomic
csi:
description: csi (Container Storage Interface) represents ephemeral
storage that is handled by certain external CSI drivers (Beta
feature).
properties:
driver:
description: driver is the name of the CSI driver that handles
this volume. Consult with your admin for the correct name
as registered in the cluster.
type: string
fsType:
description: fsType to mount. Ex. "ext4", "xfs", "ntfs".
If not provided, the empty value is passed to the associated
CSI driver which will determine the default filesystem
to apply.
type: string
nodePublishSecretRef:
description: nodePublishSecretRef is a reference to the
secret object containing sensitive information to pass
to the CSI driver to complete the CSI NodePublishVolume
and NodeUnpublishVolume calls. This field is optional,
and may be empty if no secret is required. If the secret
object contains more than one secret, all secret references
are passed.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
readOnly:
description: readOnly specifies a read-only configuration
for the volume. Defaults to false (read/write).
type: boolean
volumeAttributes:
additionalProperties:
type: string
description: volumeAttributes stores driver-specific properties
that are passed to the CSI driver. Consult your driver's
documentation for supported values.
type: object
required:
- driver
type: object
downwardAPI:
description: downwardAPI represents downward API about the pod
that should populate this volume
properties:
defaultMode:
description: 'Optional: mode bits to use on created files
by default. Must be a Optional: mode bits used to set
permissions on created files by default. Must be an octal
value between 0000 and 0777 or a decimal value between
0 and 511. YAML accepts both octal and decimal values,
JSON requires decimal values for mode bits. Defaults to
0644. Directories within the path are not affected by
this setting. This might be in conflict with other options
that affect the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
items:
description: Items is a list of downward API volume file
items:
description: DownwardAPIVolumeFile represents information
to create the file containing the pod field
properties:
fieldRef:
description: 'Required: Selects a field of the pod:
only annotations, labels, name and namespace are
supported.'
properties:
apiVersion:
description: Version of the schema the FieldPath
is written in terms of, defaults to "v1".
type: string
fieldPath:
description: Path of the field to select in the
specified API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
mode:
description: 'Optional: mode bits used to set permissions
on this file, must be an octal value between 0000
and 0777 or a decimal value between 0 and 511. YAML
accepts both octal and decimal values, JSON requires
decimal values for mode bits. If not specified,
the volume defaultMode will be used. This might
be in conflict with other options that affect the
file mode, like fsGroup, and the result can be other
mode bits set.'
format: int32
type: integer
path:
description: 'Required: Path is the relative path
name of the file to be created. Must not be absolute
or contain the ''..'' path. Must be utf-8 encoded.
The first item of the relative path must not start
with ''..'''
type: string
resourceFieldRef:
description: 'Selects a resource of the container:
only resources limits and requests (limits.cpu,
limits.memory, requests.cpu and requests.memory)
are currently supported.'
properties:
containerName:
description: 'Container name: required for volumes,
optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format of the
exposed resources, defaults to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
description: 'Required: resource to select'
type: string
required:
- resource
type: object
x-kubernetes-map-type: atomic
required:
- path
type: object
type: array
type: object
emptyDir:
description: 'emptyDir represents a temporary directory that
shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
properties:
medium:
description: 'medium represents what type of storage medium
should back this directory. The default is "" which means
to use the node''s default medium. Must be an empty string
(default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
type: string
sizeLimit:
anyOf:
- type: integer
- type: string
description: 'sizeLimit is the total amount of local storage
required for this EmptyDir volume. The size limit is also
applicable for memory medium. The maximum usage on memory
medium EmptyDir would be the minimum value between the
SizeLimit specified here and the sum of memory limits
of all containers in a pod. The default is nil which means
that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
type: object
ephemeral:
description: "ephemeral represents a volume that is handled
by a cluster storage driver. The volume's lifecycle is tied
to the pod that defines it - it will be created before the
pod starts, and deleted when the pod is removed. \n Use this
if: a) the volume is only needed while the pod runs, b) features
of normal volumes like restoring from snapshot or capacity
tracking are needed, c) the storage driver is specified through
a storage class, and d) the storage driver supports dynamic
volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource
for more information on the connection between this volume
type and PersistentVolumeClaim). \n Use PersistentVolumeClaim
or one of the vendor-specific APIs for volumes that persist
for longer than the lifecycle of an individual pod. \n Use
CSI for light-weight local ephemeral volumes if the CSI driver
is meant to be used that way - see the documentation of the
driver for more information. \n A pod can use both types of
ephemeral volumes and persistent volumes at the same time."
properties:
volumeClaimTemplate:
description: "Will be used to create a stand-alone PVC to
provision the volume. The pod in which this EphemeralVolumeSource
is embedded will be the owner of the PVC, i.e. the PVC
will be deleted together with the pod. The name of the
PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry.
Pod validation will reject the pod if the concatenated
name is not valid for a PVC (for example, too long). \n
An existing PVC with that name that is not owned by the
pod will *not* be used for the pod to avoid using an unrelated
volume by mistake. Starting the pod is then blocked until
the unrelated PVC is removed. If such a pre-created PVC
is meant to be used by the pod, the PVC has to updated
with an owner reference to the pod once the pod exists.
Normally this should not be necessary, but it may be useful
when manually reconstructing a broken cluster. \n This
field is read-only and no changes will be made by Kubernetes
to the PVC after it has been created. \n Required, must
not be nil."
properties:
metadata:
description: May contain labels and annotations that
will be copied into the PVC when creating it. No other
fields are allowed and will be rejected during validation.
type: object
spec:
description: The specification for the PersistentVolumeClaim.
The entire content is copied unchanged into the PVC
that gets created from this template. The same fields
as in a PersistentVolumeClaim are also valid here.
properties:
accessModes:
description: 'accessModes contains the desired access
modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
items:
type: string
type: array
dataSource:
description: 'dataSource field can be used to specify
either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
* An existing PVC (PersistentVolumeClaim) If the
provisioner or an external controller can support
the specified data source, it will create a new
volume based on the contents of the specified
data source. When the AnyVolumeDataSource feature
gate is enabled, dataSource contents will be copied
to dataSourceRef, and dataSourceRef contents will
be copied to dataSource when dataSourceRef.namespace
is not specified. If the namespace is specified,
then dataSourceRef will not be copied to dataSource.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API
group. For any other third-party types, APIGroup
is required.
type: string
kind:
description: Kind is the type of resource being
referenced
type: string
name:
description: Name is the name of resource being
referenced
type: string
required:
- kind
- name
type: object
x-kubernetes-map-type: atomic
dataSourceRef:
description: 'dataSourceRef specifies the object
from which to populate the volume with data, if
a non-empty volume is desired. This may be any
object from a non-empty API group (non core object)
or a PersistentVolumeClaim object. When this field
is specified, volume binding will only succeed
if the type of the specified object matches some
installed volume populator or dynamic provisioner.
This field will replace the functionality of the
dataSource field and as such if both fields are
non-empty, they must have the same value. For
backwards compatibility, when namespace isn''t
specified in dataSourceRef, both fields (dataSource
and dataSourceRef) will be set to the same value
automatically if one of them is empty and the
other is non-empty. When namespace is specified
in dataSourceRef, dataSource isn''t set to the
same value and must be empty. There are three
important differences between dataSource and dataSourceRef:
* While dataSource only allows two specific types
of objects, dataSourceRef allows any non-core
object, as well as PersistentVolumeClaim objects.
* While dataSource ignores disallowed values (dropping
them), dataSourceRef preserves all values, and
generates an error if a disallowed value is specified.
* While dataSource only allows local objects,
dataSourceRef allows objects in any namespaces.
(Beta) Using this field requires the AnyVolumeDataSource
feature gate to be enabled. (Alpha) Using the
namespace field of dataSourceRef requires the
CrossNamespaceVolumeDataSource feature gate to
be enabled.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API
group. For any other third-party types, APIGroup
is required.
type: string
kind:
description: Kind is the type of resource being
referenced
type: string
name:
description: Name is the name of resource being
referenced
type: string
namespace:
description: Namespace is the namespace of resource
being referenced Note that when a namespace
is specified, a gateway.networking.k8s.io/ReferenceGrant
object is required in the referent namespace
to allow that namespace's owner to accept
the reference. See the ReferenceGrant documentation
for details. (Alpha) This field requires the
CrossNamespaceVolumeDataSource feature gate
to be enabled.
type: string
required:
- kind
- name
type: object
resources:
description: 'resources represents the minimum resources
the volume should have. If RecoverVolumeExpansionFailure
feature is enabled users are allowed to specify
resource requirements that are lower than previous
value but must still be higher than capacity recorded
in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
properties:
claims:
description: "Claims lists the names of resources,
defined in spec.resourceClaims, that are used
by this container. \n This is an alpha field
and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable.
It can only be set for containers."
items:
description: ResourceClaim references one
entry in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name
of one entry in pod.spec.resourceClaims
of the Pod where this field is used.
It makes that resource available inside
a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount
of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum
amount of compute resources required. If Requests
is omitted for a container, it defaults to
Limits if that is explicitly specified, otherwise
to an implementation-defined value. Requests
cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
selector:
description: selector is a label query over volumes
to consider for binding.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
storageClassName:
description: 'storageClassName is the name of the
StorageClass required by the claim. More info:
https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
type: string
volumeMode:
description: volumeMode defines what type of volume
is required by the claim. Value of Filesystem
is implied when not included in claim spec.
type: string
volumeName:
description: volumeName is the binding reference
to the PersistentVolume backing this claim.
type: string
type: object
required:
- spec
type: object
type: object
fc:
description: fc represents a Fibre Channel resource that is
attached to a kubelet's host machine and then exposed to the
pod.
properties:
fsType:
description: 'fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified. TODO: how do we prevent errors in the
filesystem from compromising the machine'
type: string
lun:
description: 'lun is Optional: FC target lun number'
format: int32
type: integer
readOnly:
description: 'readOnly is Optional: Defaults to false (read/write).
ReadOnly here will force the ReadOnly setting in VolumeMounts.'
type: boolean
targetWWNs:
description: 'targetWWNs is Optional: FC target worldwide
names (WWNs)'
items:
type: string
type: array
wwids:
description: 'wwids Optional: FC volume world wide identifiers
(wwids) Either wwids or combination of targetWWNs and
lun must be set, but not both simultaneously.'
items:
type: string
type: array
type: object
flexVolume:
description: flexVolume represents a generic volume resource
that is provisioned/attached using an exec based plugin.
properties:
driver:
description: driver is the name of the driver to use for
this volume.
type: string
fsType:
description: fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". The default filesystem depends
on FlexVolume script.
type: string
options:
additionalProperties:
type: string
description: 'options is Optional: this field holds extra
command options if any.'
type: object
readOnly:
description: 'readOnly is Optional: defaults to false (read/write).
ReadOnly here will force the ReadOnly setting in VolumeMounts.'
type: boolean
secretRef:
description: 'secretRef is Optional: secretRef is reference
to the secret object containing sensitive information
to pass to the plugin scripts. This may be empty if no
secret object is specified. If the secret object contains
more than one secret, all secrets are passed to the plugin
scripts.'
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
required:
- driver
type: object
flocker:
description: flocker represents a Flocker volume attached to
a kubelet's host machine. This depends on the Flocker control
service being running
properties:
datasetName:
description: datasetName is Name of the dataset stored as
metadata -> name on the dataset for Flocker should be
considered as deprecated
type: string
datasetUUID:
description: datasetUUID is the UUID of the dataset. This
is unique identifier of a Flocker dataset
type: string
type: object
gcePersistentDisk:
description: 'gcePersistentDisk represents a GCE Disk resource
that is attached to a kubelet''s host machine and then exposed
to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk'
properties:
fsType:
description: 'fsType is filesystem type of the volume that
you want to mount. Tip: Ensure that the filesystem type
is supported by the host operating system. Examples: "ext4",
"xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
TODO: how do we prevent errors in the filesystem from
compromising the machine'
type: string
partition:
description: 'partition is the partition in the volume that
you want to mount. If omitted, the default is to mount
by volume name. Examples: For volume /dev/sda1, you specify
the partition as "1". Similarly, the volume partition
for /dev/sda is "0" (or you can leave the property empty).
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk'
format: int32
type: integer
pdName:
description: 'pdName is unique name of the PD resource in
GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk'
type: string
readOnly:
description: 'readOnly here will force the ReadOnly setting
in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk'
type: boolean
required:
- pdName
type: object
gitRepo:
description: 'gitRepo represents a git repository at a particular
revision. DEPRECATED: GitRepo is deprecated. To provision
a container with a git repo, mount an EmptyDir into an InitContainer
that clones the repo using git, then mount the EmptyDir into
the Pod''s container.'
properties:
directory:
description: directory is the target directory name. Must
not contain or start with '..'. If '.' is supplied, the
volume directory will be the git repository. Otherwise,
if specified, the volume will contain the git repository
in the subdirectory with the given name.
type: string
repository:
description: repository is the URL
type: string
revision:
description: revision is the commit hash for the specified
revision.
type: string
required:
- repository
type: object
glusterfs:
description: 'glusterfs represents a Glusterfs mount on the
host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md'
properties:
endpoints:
description: 'endpoints is the endpoint name that details
Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod'
type: string
path:
description: 'path is the Glusterfs volume path. More info:
https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod'
type: string
readOnly:
description: 'readOnly here will force the Glusterfs volume
to be mounted with read-only permissions. Defaults to
false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod'
type: boolean
required:
- endpoints
- path
type: object
hostPath:
description: 'hostPath represents a pre-existing file or directory
on the host machine that is directly exposed to the container.
This is generally used for system agents or other privileged
things that are allowed to see the host machine. Most containers
will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
--- TODO(jonesdl) We need to restrict who can use host directory
mounts and who can/can not mount host directories as read/write.'
properties:
path:
description: 'path of the directory on the host. If the
path is a symlink, it will follow the link to the real
path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath'
type: string
type:
description: 'type for HostPath Volume Defaults to "" More
info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath'
type: string
required:
- path
type: object
iscsi:
description: 'iscsi represents an ISCSI Disk resource that is
attached to a kubelet''s host machine and then exposed to
the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md'
properties:
chapAuthDiscovery:
description: chapAuthDiscovery defines whether support iSCSI
Discovery CHAP authentication
type: boolean
chapAuthSession:
description: chapAuthSession defines whether support iSCSI
Session CHAP authentication
type: boolean
fsType:
description: 'fsType is the filesystem type of the volume
that you want to mount. Tip: Ensure that the filesystem
type is supported by the host operating system. Examples:
"ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
TODO: how do we prevent errors in the filesystem from
compromising the machine'
type: string
initiatorName:
description: initiatorName is the custom iSCSI Initiator
Name. If initiatorName is specified with iscsiInterface
simultaneously, new iSCSI interface : will be created for the connection.
type: string
iqn:
description: iqn is the target iSCSI Qualified Name.
type: string
iscsiInterface:
description: iscsiInterface is the interface Name that uses
an iSCSI transport. Defaults to 'default' (tcp).
type: string
lun:
description: lun represents iSCSI Target Lun number.
format: int32
type: integer
portals:
description: portals is the iSCSI Target Portal List. The
portal is either an IP or ip_addr:port if the port is
other than default (typically TCP ports 860 and 3260).
items:
type: string
type: array
readOnly:
description: readOnly here will force the ReadOnly setting
in VolumeMounts. Defaults to false.
type: boolean
secretRef:
description: secretRef is the CHAP Secret for iSCSI target
and initiator authentication
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
targetPortal:
description: targetPortal is iSCSI Target Portal. The Portal
is either an IP or ip_addr:port if the port is other than
default (typically TCP ports 860 and 3260).
type: string
required:
- iqn
- lun
- targetPortal
type: object
name:
description: 'name of the volume. Must be a DNS_LABEL and unique
within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
nfs:
description: 'nfs represents an NFS mount on the host that shares
a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs'
properties:
path:
description: 'path that is exported by the NFS server. More
info: https://kubernetes.io/docs/concepts/storage/volumes#nfs'
type: string
readOnly:
description: 'readOnly here will force the NFS export to
be mounted with read-only permissions. Defaults to false.
More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs'
type: boolean
server:
description: 'server is the hostname or IP address of the
NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs'
type: string
required:
- path
- server
type: object
persistentVolumeClaim:
description: 'persistentVolumeClaimVolumeSource represents a
reference to a PersistentVolumeClaim in the same namespace.
More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
properties:
claimName:
description: 'claimName is the name of a PersistentVolumeClaim
in the same namespace as the pod using this volume. More
info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
type: string
readOnly:
description: readOnly Will force the ReadOnly setting in
VolumeMounts. Default false.
type: boolean
required:
- claimName
type: object
photonPersistentDisk:
description: photonPersistentDisk represents a PhotonController
persistent disk attached and mounted on kubelets host machine
properties:
fsType:
description: fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified.
type: string
pdID:
description: pdID is the ID that identifies Photon Controller
persistent disk
type: string
required:
- pdID
type: object
portworxVolume:
description: portworxVolume represents a portworx volume attached
and mounted on kubelets host machine
properties:
fsType:
description: fSType represents the filesystem type to mount
Must be a filesystem type supported by the host operating
system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4"
if unspecified.
type: string
readOnly:
description: readOnly defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
type: boolean
volumeID:
description: volumeID uniquely identifies a Portworx volume
type: string
required:
- volumeID
type: object
projected:
description: projected items for all in one resources secrets,
configmaps, and downward API
properties:
defaultMode:
description: defaultMode are the mode bits used to set permissions
on created files by default. Must be an octal value between
0000 and 0777 or a decimal value between 0 and 511. YAML
accepts both octal and decimal values, JSON requires decimal
values for mode bits. Directories within the path are
not affected by this setting. This might be in conflict
with other options that affect the file mode, like fsGroup,
and the result can be other mode bits set.
format: int32
type: integer
sources:
description: sources is the list of volume projections
items:
description: Projection that may be projected along with
other supported volume types
properties:
configMap:
description: configMap information about the configMap
data to project
properties:
items:
description: items if unspecified, each key-value
pair in the Data field of the referenced ConfigMap
will be projected into the volume as a file
whose name is the key and content is the value.
If specified, the listed keys will be projected
into the specified paths, and unlisted keys
will not be present. If a key is specified which
is not present in the ConfigMap, the volume
setup will error unless it is marked optional.
Paths must be relative and may not contain the
'..' path or start with '..'.
items:
description: Maps a string key to a path within
a volume.
properties:
key:
description: key is the key to project.
type: string
mode:
description: 'mode is Optional: mode bits
used to set permissions on this file.
Must be an octal value between 0000 and
0777 or a decimal value between 0 and
511. YAML accepts both octal and decimal
values, JSON requires decimal values for
mode bits. If not specified, the volume
defaultMode will be used. This might be
in conflict with other options that affect
the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
path:
description: path is the relative path of
the file to map the key to. May not be
an absolute path. May not contain the
path element '..'. May not start with
the string '..'.
type: string
required:
- key
- path
type: object
type: array
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: optional specify whether the ConfigMap
or its keys must be defined
type: boolean
type: object
x-kubernetes-map-type: atomic
downwardAPI:
description: downwardAPI information about the downwardAPI
data to project
properties:
items:
description: Items is a list of DownwardAPIVolume
file
items:
description: DownwardAPIVolumeFile represents
information to create the file containing
the pod field
properties:
fieldRef:
description: 'Required: Selects a field
of the pod: only annotations, labels,
name and namespace are supported.'
properties:
apiVersion:
description: Version of the schema the
FieldPath is written in terms of,
defaults to "v1".
type: string
fieldPath:
description: Path of the field to select
in the specified API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
mode:
description: 'Optional: mode bits used to
set permissions on this file, must be
an octal value between 0000 and 0777 or
a decimal value between 0 and 511. YAML
accepts both octal and decimal values,
JSON requires decimal values for mode
bits. If not specified, the volume defaultMode
will be used. This might be in conflict
with other options that affect the file
mode, like fsGroup, and the result can
be other mode bits set.'
format: int32
type: integer
path:
description: 'Required: Path is the relative
path name of the file to be created. Must
not be absolute or contain the ''..''
path. Must be utf-8 encoded. The first
item of the relative path must not start
with ''..'''
type: string
resourceFieldRef:
description: 'Selects a resource of the
container: only resources limits and requests
(limits.cpu, limits.memory, requests.cpu
and requests.memory) are currently supported.'
properties:
containerName:
description: 'Container name: required
for volumes, optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format
of the exposed resources, defaults
to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
description: 'Required: resource to
select'
type: string
required:
- resource
type: object
x-kubernetes-map-type: atomic
required:
- path
type: object
type: array
type: object
secret:
description: secret information about the secret data
to project
properties:
items:
description: items if unspecified, each key-value
pair in the Data field of the referenced Secret
will be projected into the volume as a file
whose name is the key and content is the value.
If specified, the listed keys will be projected
into the specified paths, and unlisted keys
will not be present. If a key is specified which
is not present in the Secret, the volume setup
will error unless it is marked optional. Paths
must be relative and may not contain the '..'
path or start with '..'.
items:
description: Maps a string key to a path within
a volume.
properties:
key:
description: key is the key to project.
type: string
mode:
description: 'mode is Optional: mode bits
used to set permissions on this file.
Must be an octal value between 0000 and
0777 or a decimal value between 0 and
511. YAML accepts both octal and decimal
values, JSON requires decimal values for
mode bits. If not specified, the volume
defaultMode will be used. This might be
in conflict with other options that affect
the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
path:
description: path is the relative path of
the file to map the key to. May not be
an absolute path. May not contain the
path element '..'. May not start with
the string '..'.
type: string
required:
- key
- path
type: object
type: array
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: optional field specify whether the
Secret or its key must be defined
type: boolean
type: object
x-kubernetes-map-type: atomic
serviceAccountToken:
description: serviceAccountToken is information about
the serviceAccountToken data to project
properties:
audience:
description: audience is the intended audience
of the token. A recipient of a token must identify
itself with an identifier specified in the audience
of the token, and otherwise should reject the
token. The audience defaults to the identifier
of the apiserver.
type: string
expirationSeconds:
description: expirationSeconds is the requested
duration of validity of the service account
token. As the token approaches expiration, the
kubelet volume plugin will proactively rotate
the service account token. The kubelet will
start trying to rotate the token if the token
is older than 80 percent of its time to live
or if the token is older than 24 hours.Defaults
to 1 hour and must be at least 10 minutes.
format: int64
type: integer
path:
description: path is the path relative to the
mount point of the file to project the token
into.
type: string
required:
- path
type: object
type: object
type: array
type: object
quobyte:
description: quobyte represents a Quobyte mount on the host
that shares a pod's lifetime
properties:
group:
description: group to map volume access to Default is no
group
type: string
readOnly:
description: readOnly here will force the Quobyte volume
to be mounted with read-only permissions. Defaults to
false.
type: boolean
registry:
description: registry represents a single or multiple Quobyte
Registry services specified as a string as host:port pair
(multiple entries are separated with commas) which acts
as the central registry for volumes
type: string
tenant:
description: tenant owning the given Quobyte volume in the
Backend Used with dynamically provisioned Quobyte volumes,
value is set by the plugin
type: string
user:
description: user to map volume access to Defaults to serivceaccount
user
type: string
volume:
description: volume is a string that references an already
created Quobyte volume by name.
type: string
required:
- registry
- volume
type: object
rbd:
description: 'rbd represents a Rados Block Device mount on the
host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md'
properties:
fsType:
description: 'fsType is the filesystem type of the volume
that you want to mount. Tip: Ensure that the filesystem
type is supported by the host operating system. Examples:
"ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
TODO: how do we prevent errors in the filesystem from
compromising the machine'
type: string
image:
description: 'image is the rados image name. More info:
https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
type: string
keyring:
description: 'keyring is the path to key ring for RBDUser.
Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
type: string
monitors:
description: 'monitors is a collection of Ceph monitors.
More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
items:
type: string
type: array
pool:
description: 'pool is the rados pool name. Default is rbd.
More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
type: string
readOnly:
description: 'readOnly here will force the ReadOnly setting
in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
type: boolean
secretRef:
description: 'secretRef is name of the authentication secret
for RBDUser. If provided overrides keyring. Default is
nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
user:
description: 'user is the rados user name. Default is admin.
More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
type: string
required:
- image
- monitors
type: object
scaleIO:
description: scaleIO represents a ScaleIO persistent volume
attached and mounted on Kubernetes nodes.
properties:
fsType:
description: fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Default is "xfs".
type: string
gateway:
description: gateway is the host address of the ScaleIO
API Gateway.
type: string
protectionDomain:
description: protectionDomain is the name of the ScaleIO
Protection Domain for the configured storage.
type: string
readOnly:
description: readOnly Defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
type: boolean
secretRef:
description: secretRef references to the secret for ScaleIO
user and other sensitive information. If this is not provided,
Login operation will fail.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
sslEnabled:
description: sslEnabled Flag enable/disable SSL communication
with Gateway, default false
type: boolean
storageMode:
description: storageMode indicates whether the storage for
a volume should be ThickProvisioned or ThinProvisioned.
Default is ThinProvisioned.
type: string
storagePool:
description: storagePool is the ScaleIO Storage Pool associated
with the protection domain.
type: string
system:
description: system is the name of the storage system as
configured in ScaleIO.
type: string
volumeName:
description: volumeName is the name of a volume already
created in the ScaleIO system that is associated with
this volume source.
type: string
required:
- gateway
- secretRef
- system
type: object
secret:
description: 'secret represents a secret that should populate
this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret'
properties:
defaultMode:
description: 'defaultMode is Optional: mode bits used to
set permissions on created files by default. Must be an
octal value between 0000 and 0777 or a decimal value between
0 and 511. YAML accepts both octal and decimal values,
JSON requires decimal values for mode bits. Defaults to
0644. Directories within the path are not affected by
this setting. This might be in conflict with other options
that affect the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
items:
description: items If unspecified, each key-value pair in
the Data field of the referenced Secret will be projected
into the volume as a file whose name is the key and content
is the value. If specified, the listed keys will be projected
into the specified paths, and unlisted keys will not be
present. If a key is specified which is not present in
the Secret, the volume setup will error unless it is marked
optional. Paths must be relative and may not contain the
'..' path or start with '..'.
items:
description: Maps a string key to a path within a volume.
properties:
key:
description: key is the key to project.
type: string
mode:
description: 'mode is Optional: mode bits used to
set permissions on this file. Must be an octal value
between 0000 and 0777 or a decimal value between
0 and 511. YAML accepts both octal and decimal values,
JSON requires decimal values for mode bits. If not
specified, the volume defaultMode will be used.
This might be in conflict with other options that
affect the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
path:
description: path is the relative path of the file
to map the key to. May not be an absolute path.
May not contain the path element '..'. May not start
with the string '..'.
type: string
required:
- key
- path
type: object
type: array
optional:
description: optional field specify whether the Secret or
its keys must be defined
type: boolean
secretName:
description: 'secretName is the name of the secret in the
pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret'
type: string
type: object
storageos:
description: storageOS represents a StorageOS volume attached
and mounted on Kubernetes nodes.
properties:
fsType:
description: fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified.
type: string
readOnly:
description: readOnly defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
type: boolean
secretRef:
description: secretRef specifies the secret to use for obtaining
the StorageOS API credentials. If not specified, default
values will be attempted.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
volumeName:
description: volumeName is the human-readable name of the
StorageOS volume. Volume names are only unique within
a namespace.
type: string
volumeNamespace:
description: volumeNamespace specifies the scope of the
volume within StorageOS. If no namespace is specified
then the Pod's namespace will be used. This allows the
Kubernetes name scoping to be mirrored within StorageOS
for tighter integration. Set VolumeName to any name to
override the default behaviour. Set to "default" if you
are not using namespaces within StorageOS. Namespaces
that do not pre-exist within StorageOS will be created.
type: string
type: object
vsphereVolume:
description: vsphereVolume represents a vSphere volume attached
and mounted on kubelets host machine
properties:
fsType:
description: fsType is filesystem type to mount. Must be
a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified.
type: string
storagePolicyID:
description: storagePolicyID is the storage Policy Based
Management (SPBM) profile ID associated with the StoragePolicyName.
type: string
storagePolicyName:
description: storagePolicyName is the storage Policy Based
Management (SPBM) profile name.
type: string
volumePath:
description: volumePath is the path that identifies vSphere
volume vmdk
type: string
required:
- volumePath
type: object
required:
- name
type: object
type: array
walCompression:
description: "Configures compression of the write-ahead log (WAL)
using Snappy. \n WAL compression is enabled by default for Prometheus
>= 2.20.0 \n Requires Prometheus v2.11.0 and above."
type: boolean
web:
description: Defines the configuration of the Prometheus web server.
properties:
httpConfig:
description: Defines HTTP parameters for web server.
properties:
headers:
description: List of headers that can be added to HTTP responses.
properties:
contentSecurityPolicy:
description: Set the Content-Security-Policy header to
HTTP responses. Unset if blank.
type: string
strictTransportSecurity:
description: Set the Strict-Transport-Security header
to HTTP responses. Unset if blank. Please make sure
that you use this with care as this header might force
browsers to load Prometheus and the other applications
hosted on the same domain and subdomains over HTTPS.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security
type: string
xContentTypeOptions:
description: Set the X-Content-Type-Options header to
HTTP responses. Unset if blank. Accepted value is nosniff.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options
enum:
- ""
- NoSniff
type: string
xFrameOptions:
description: Set the X-Frame-Options header to HTTP responses.
Unset if blank. Accepted values are deny and sameorigin.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options
enum:
- ""
- Deny
- SameOrigin
type: string
xXSSProtection:
description: Set the X-XSS-Protection header to all responses.
Unset if blank. https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection
type: string
type: object
http2:
description: Enable HTTP/2 support. Note that HTTP/2 is only
supported with TLS. When TLSConfig is not configured, HTTP/2
will be disabled. Whenever the value of the field changes,
a rolling update will be triggered.
type: boolean
type: object
maxConnections:
description: Defines the maximum number of simultaneous connections
A zero value means that Prometheus doesn't accept any incoming
connection.
format: int32
minimum: 0
type: integer
pageTitle:
description: The prometheus web page title.
type: string
tlsConfig:
description: Defines the TLS parameters for HTTPS.
properties:
cert:
description: Contains the TLS certificate for the server.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cipherSuites:
description: 'List of supported cipher suites for TLS versions
up to TLS 1.2. If empty, Go default cipher suites are used.
Available cipher suites are documented in the go documentation:
https://golang.org/pkg/crypto/tls/#pkg-constants'
items:
type: string
type: array
client_ca:
description: Contains the CA certificate for client certificate
authentication to the server.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientAuthType:
description: 'Server policy for client authentication. Maps
to ClientAuth Policies. For more detail on clientAuth options:
https://golang.org/pkg/crypto/tls/#ClientAuthType'
type: string
curvePreferences:
description: 'Elliptic curves that will be used in an ECDHE
handshake, in preference order. Available curves are documented
in the go documentation: https://golang.org/pkg/crypto/tls/#CurveID'
items:
type: string
type: array
keySecret:
description: Secret containing the TLS key for the server.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
maxVersion:
description: Maximum TLS version that is acceptable. Defaults
to TLS13.
type: string
minVersion:
description: Minimum TLS version that is acceptable. Defaults
to TLS12.
type: string
preferServerCipherSuites:
description: Controls whether the server selects the client's
most preferred cipher suite, or the server's most preferred
cipher suite. If true then the server's preference, as expressed
in the order of elements in cipherSuites, is used.
type: boolean
required:
- cert
- keySecret
type: object
type: object
type: object
status:
description: 'Most recent observed status of the Prometheus cluster. Read-only.
More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status'
properties:
availableReplicas:
description: Total number of available pods (ready for at least minReadySeconds)
targeted by this Prometheus deployment.
format: int32
type: integer
conditions:
description: The current state of the Prometheus deployment.
items:
description: Condition represents the state of the resources associated
with the Prometheus, Alertmanager or ThanosRuler resource.
properties:
lastTransitionTime:
description: lastTransitionTime is the time of the last update
to the current status property.
format: date-time
type: string
message:
description: Human-readable message indicating details for the
condition's last transition.
type: string
observedGeneration:
description: ObservedGeneration represents the .metadata.generation
that the condition was set based upon. For instance, if `.metadata.generation`
is currently 12, but the `.status.conditions[].observedGeneration`
is 9, the condition is out of date with respect to the current
state of the instance.
format: int64
type: integer
reason:
description: Reason for the condition's last transition.
type: string
status:
description: Status of the condition.
type: string
type:
description: Type of the condition being reported.
type: string
required:
- lastTransitionTime
- status
- type
type: object
type: array
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
paused:
description: Represents whether any actions on the underlying managed
objects are being performed. Only delete actions will be performed.
type: boolean
replicas:
description: Total number of non-terminated pods targeted by this
Prometheus deployment (their labels match the selector).
format: int32
type: integer
selector:
description: The selector used to match the pods targeted by this
Prometheus resource.
type: string
shardStatuses:
description: The list has one entry per shard. Each entry provides
a summary of the shard status.
items:
properties:
availableReplicas:
description: Total number of available pods (ready for at least
minReadySeconds) targeted by this shard.
format: int32
type: integer
replicas:
description: Total number of pods targeted by this shard.
format: int32
type: integer
shardID:
description: Identifier of the shard.
type: string
unavailableReplicas:
description: Total number of unavailable pods targeted by this
shard.
format: int32
type: integer
updatedReplicas:
description: Total number of non-terminated pods targeted by
this shard that have the desired spec.
format: int32
type: integer
required:
- availableReplicas
- replicas
- shardID
- unavailableReplicas
- updatedReplicas
type: object
type: array
x-kubernetes-list-map-keys:
- shardID
x-kubernetes-list-type: map
shards:
description: Shards is the most recently observed number of shards.
format: int32
type: integer
unavailableReplicas:
description: Total number of unavailable pods targeted by this Prometheus
deployment.
format: int32
type: integer
updatedReplicas:
description: Total number of non-terminated pods targeted by this
Prometheus deployment that have the desired version spec.
format: int32
type: integer
required:
- availableReplicas
- paused
- replicas
- unavailableReplicas
- updatedReplicas
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
scale:
labelSelectorPath: .status.selector
specReplicasPath: .spec.shards
statusReplicasPath: .status.shards
status: {}
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.13.0
operator.prometheus.io/version: 0.71.2
name: prometheusrules.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
categories:
- prometheus-operator
kind: PrometheusRule
listKind: PrometheusRuleList
plural: prometheusrules
shortNames:
- promrule
singular: prometheusrule
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: PrometheusRule defines recording and alerting rules for a Prometheus
instance
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Specification of desired alerting rule definitions for Prometheus.
properties:
groups:
description: Content of Prometheus rule file
items:
description: RuleGroup is a list of sequentially evaluated recording
and alerting rules.
properties:
interval:
description: Interval determines how often rules in the group
are evaluated.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
limit:
description: Limit the number of alerts an alerting rule and
series a recording rule can produce. Limit is supported starting
with Prometheus >= 2.31 and Thanos Ruler >= 0.24.
type: integer
name:
description: Name of the rule group.
minLength: 1
type: string
partial_response_strategy:
description: 'PartialResponseStrategy is only used by ThanosRuler
and will be ignored by Prometheus instances. More info: https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md#partial-response'
pattern: ^(?i)(abort|warn)?$
type: string
rules:
description: List of alerting and recording rules.
items:
description: 'Rule describes an alerting or recording rule
See Prometheus documentation: [alerting](https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules/)
or [recording](https://www.prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules)
rule'
properties:
alert:
description: Name of the alert. Must be a valid label
value. Only one of `record` and `alert` must be set.
type: string
annotations:
additionalProperties:
type: string
description: Annotations to add to each alert. Only valid
for alerting rules.
type: object
expr:
anyOf:
- type: integer
- type: string
description: PromQL expression to evaluate.
x-kubernetes-int-or-string: true
for:
description: Alerts are considered firing once they have
been returned for this long.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
keep_firing_for:
description: KeepFiringFor defines how long an alert will
continue firing after the condition that triggered it
has cleared.
minLength: 1
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
labels:
additionalProperties:
type: string
description: Labels to add or overwrite.
type: object
record:
description: Name of the time series to output to. Must
be a valid metric name. Only one of `record` and `alert`
must be set.
type: string
required:
- expr
type: object
type: array
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
type: object
required:
- spec
type: object
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.13.0
operator.prometheus.io/version: 0.71.2
name: podmonitors.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
categories:
- prometheus-operator
kind: PodMonitor
listKind: PodMonitorList
plural: podmonitors
shortNames:
- pmon
singular: podmonitor
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: PodMonitor defines monitoring for a set of pods.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Specification of desired Pod selection for target discovery
by Prometheus.
properties:
attachMetadata:
description: "`attachMetadata` defines additional metadata which is
added to the discovered targets. \n It requires Prometheus >= v2.37.0."
properties:
node:
description: When set to true, Prometheus must have the `get`
permission on the `Nodes` objects.
type: boolean
type: object
jobLabel:
description: "The label to use to retrieve the job name from. `jobLabel`
selects the label from the associated Kubernetes `Pod` object which
will be used as the `job` label for all metrics. \n For example
if `jobLabel` is set to `foo` and the Kubernetes `Pod` object is
labeled with `foo: bar`, then Prometheus adds the `job=\"bar\"`
label to all ingested metrics. \n If the value of this field is
empty, the `job` label of the metrics defaults to the namespace
and name of the PodMonitor object (e.g. `/`)."
type: string
keepDroppedTargets:
description: "Per-scrape limit on the number of targets dropped by
relabeling that will be kept in memory. 0 means no limit. \n It
requires Prometheus >= v2.47.0."
format: int64
type: integer
labelLimit:
description: "Per-scrape limit on number of labels that will be accepted
for a sample. \n It requires Prometheus >= v2.27.0."
format: int64
type: integer
labelNameLengthLimit:
description: "Per-scrape limit on length of labels name that will
be accepted for a sample. \n It requires Prometheus >= v2.27.0."
format: int64
type: integer
labelValueLengthLimit:
description: "Per-scrape limit on length of labels value that will
be accepted for a sample. \n It requires Prometheus >= v2.27.0."
format: int64
type: integer
namespaceSelector:
description: Selector to select which namespaces the Kubernetes `Pods`
objects are discovered from.
properties:
any:
description: Boolean describing whether all namespaces are selected
in contrast to a list restricting them.
type: boolean
matchNames:
description: List of namespace names to select from.
items:
type: string
type: array
type: object
podMetricsEndpoints:
description: List of endpoints part of this PodMonitor.
items:
description: PodMetricsEndpoint defines an endpoint serving Prometheus
metrics to be scraped by Prometheus.
properties:
authorization:
description: "`authorization` configures the Authorization header
credentials to use when scraping the target. \n Cannot be
set at the same time as `basicAuth`, or `oauth2`."
properties:
credentials:
description: Selects a key of a Secret in the namespace
that contains the credentials for authentication.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type. The value
is case-insensitive. \n \"Basic\" is not a supported value.
\n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: "`basicAuth` configures the Basic Authentication
credentials to use when scraping the target. \n Cannot be
set at the same time as `authorization`, or `oauth2`."
properties:
password:
description: '`password` specifies a key of a Secret containing
the password for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a Secret containing
the username for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenSecret:
description: "`bearerTokenSecret` specifies a key of a Secret
containing the bearer token for scraping targets. The secret
needs to be in the same namespace as the PodMonitor object
and readable by the Prometheus Operator. \n Deprecated: use
`authorization` instead."
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
enableHttp2:
description: '`enableHttp2` can be used to disable HTTP2 when
scraping the target.'
type: boolean
filterRunning:
description: "When true, the pods which are not running (e.g.
either in Failed or Succeeded state) are dropped during the
target discovery. \n If unset, the filtering is enabled. \n
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase"
type: boolean
followRedirects:
description: '`followRedirects` defines whether the scrape requests
should follow HTTP 3xx redirects.'
type: boolean
honorLabels:
description: When true, `honorLabels` preserves the metric's
labels when they collide with the target's labels.
type: boolean
honorTimestamps:
description: '`honorTimestamps` controls whether Prometheus
preserves the timestamps when exposed by the target.'
type: boolean
interval:
description: "Interval at which Prometheus scrapes the metrics
from the target. \n If empty, Prometheus uses the global scrape
interval."
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
metricRelabelings:
description: '`metricRelabelings` configures the relabeling
rules to apply to the samples before ingestion.'
items:
description: "RelabelConfig allows dynamic rewriting of the
label set for targets, alerts, scraped samples and remote
write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config"
properties:
action:
default: replace
description: "Action to perform based on the regex matching.
\n `Uppercase` and `Lowercase` actions require Prometheus
>= v2.36.0. `DropEqual` and `KeepEqual` actions require
Prometheus >= v2.41.0. \n Default: \"Replace\""
enum:
- replace
- Replace
- keep
- Keep
- drop
- Drop
- hashmod
- HashMod
- labelmap
- LabelMap
- labeldrop
- LabelDrop
- labelkeep
- LabelKeep
- lowercase
- Lowercase
- uppercase
- Uppercase
- keepequal
- KeepEqual
- dropequal
- DropEqual
type: string
modulus:
description: "Modulus to take of the hash of the source
label values. \n Only applicable when the action is
`HashMod`."
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched.
type: string
replacement:
description: "Replacement value against which a Replace
action is performed if the regular expression matches.
\n Regex capture groups are available."
type: string
separator:
description: Separator is the string between concatenated
SourceLabels.
type: string
sourceLabels:
description: The source labels select values from existing
labels. Their content is concatenated using the configured
Separator and matched against the configured regular
expression.
items:
description: LabelName is a valid Prometheus label name
which may only contain ASCII letters, numbers, as
well as underscores.
pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
type: string
type: array
targetLabel:
description: "Label to which the resulting string is written
in a replacement. \n It is mandatory for `Replace`,
`HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and
`DropEqual` actions. \n Regex capture groups are available."
type: string
type: object
type: array
oauth2:
description: "`oauth2` configures the OAuth2 settings to use
when scraping the target. \n It requires Prometheus >= 2.27.0.
\n Cannot be set at the same time as `authorization`, or `basicAuth`."
properties:
clientId:
description: '`clientId` specifies a key of a Secret or
ConfigMap containing the OAuth2 client''s ID.'
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of a Secret
containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the HTTP parameters
to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes used for
the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to fetch the
token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
params:
additionalProperties:
items:
type: string
type: array
description: '`params` define optional HTTP URL parameters.'
type: object
path:
description: "HTTP path from which to scrape for metrics. \n
If empty, Prometheus uses the default value (e.g. `/metrics`)."
type: string
port:
description: "Name of the Pod port which this endpoint refers
to. \n It takes precedence over `targetPort`."
type: string
proxyUrl:
description: '`proxyURL` configures the HTTP Proxy URL (e.g.
"http://proxyserver:2195") to go through when scraping the
target.'
type: string
relabelings:
description: "`relabelings` configures the relabeling rules
to apply the target's metadata labels. \n The Operator automatically
adds relabelings for a few standard Kubernetes fields. \n
The original scrape job's name is available via the `__tmp_prometheus_job_name`
label. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config"
items:
description: "RelabelConfig allows dynamic rewriting of the
label set for targets, alerts, scraped samples and remote
write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config"
properties:
action:
default: replace
description: "Action to perform based on the regex matching.
\n `Uppercase` and `Lowercase` actions require Prometheus
>= v2.36.0. `DropEqual` and `KeepEqual` actions require
Prometheus >= v2.41.0. \n Default: \"Replace\""
enum:
- replace
- Replace
- keep
- Keep
- drop
- Drop
- hashmod
- HashMod
- labelmap
- LabelMap
- labeldrop
- LabelDrop
- labelkeep
- LabelKeep
- lowercase
- Lowercase
- uppercase
- Uppercase
- keepequal
- KeepEqual
- dropequal
- DropEqual
type: string
modulus:
description: "Modulus to take of the hash of the source
label values. \n Only applicable when the action is
`HashMod`."
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched.
type: string
replacement:
description: "Replacement value against which a Replace
action is performed if the regular expression matches.
\n Regex capture groups are available."
type: string
separator:
description: Separator is the string between concatenated
SourceLabels.
type: string
sourceLabels:
description: The source labels select values from existing
labels. Their content is concatenated using the configured
Separator and matched against the configured regular
expression.
items:
description: LabelName is a valid Prometheus label name
which may only contain ASCII letters, numbers, as
well as underscores.
pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
type: string
type: array
targetLabel:
description: "Label to which the resulting string is written
in a replacement. \n It is mandatory for `Replace`,
`HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and
`DropEqual` actions. \n Regex capture groups are available."
type: string
type: object
type: array
scheme:
description: "HTTP scheme to use for scraping. \n `http` and
`https` are the expected values unless you rewrite the `__scheme__`
label via relabeling. \n If empty, Prometheus uses the default
value `http`."
enum:
- http
- https
type: string
scrapeTimeout:
description: "Timeout after which Prometheus considers the scrape
to be failed. \n If empty, Prometheus uses the global scrape
timeout unless it is less than the target's scrape interval
value in which the latter is used."
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
targetPort:
anyOf:
- type: integer
- type: string
description: "Name or number of the target port of the `Pod`
object behind the Service, the port must be specified with
container port property. \n Deprecated: use 'port' instead."
x-kubernetes-int-or-string: true
tlsConfig:
description: TLS configuration to use when scraping the target.
properties:
ca:
description: Certificate authority used when verifying server
certificates.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key file for the
targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
trackTimestampsStaleness:
description: "`trackTimestampsStaleness` defines whether Prometheus
tracks staleness of the metrics that have an explicit timestamp
present in scraped data. Has no effect if `honorTimestamps`
is false. \n It requires Prometheus >= v2.48.0."
type: boolean
type: object
type: array
podTargetLabels:
description: '`podTargetLabels` defines the labels which are transferred
from the associated Kubernetes `Pod` object onto the ingested metrics.'
items:
type: string
type: array
sampleLimit:
description: '`sampleLimit` defines a per-scrape limit on the number
of scraped samples that will be accepted.'
format: int64
type: integer
selector:
description: Label selector to select the Kubernetes `Pod` objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
targetLimit:
description: '`targetLimit` defines a limit on the number of scraped
targets that will be accepted.'
format: int64
type: integer
required:
- selector
type: object
required:
- spec
type: object
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.13.0
operator.prometheus.io/version: 0.71.2
name: scrapeconfigs.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
categories:
- prometheus-operator
kind: ScrapeConfig
listKind: ScrapeConfigList
plural: scrapeconfigs
shortNames:
- scfg
singular: scrapeconfig
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: ScrapeConfig defines a namespaced Prometheus scrape_config to
be aggregated across multiple namespaces into the Prometheus configuration.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ScrapeConfigSpec is a specification of the desired configuration
for a scrape configuration.
properties:
authorization:
description: Authorization header to use on every scrape request.
properties:
credentials:
description: Selects a key of a Secret in the namespace that contains
the credentials for authentication.
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type. The value is case-insensitive.
\n \"Basic\" is not a supported value. \n Default: \"Bearer\""
type: string
type: object
azureSDConfigs:
description: AzureSDConfigs defines a list of Azure service discovery
configurations.
items:
description: AzureSDConfig allow retrieving scrape targets from
Azure VMs. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#azure_sd_config
properties:
authenticationMethod:
description: '# The authentication method, either OAuth or ManagedIdentity.
See https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview'
enum:
- OAuth
- ManagedIdentity
type: string
clientID:
description: Optional client ID. Only required with the OAuth
authentication method.
type: string
clientSecret:
description: Optional client secret. Only required with the
OAuth authentication method.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
environment:
description: The Azure environment.
type: string
port:
description: The port to scrape metrics from. If using the public
IP address, this must instead be specified in the relabeling
rule.
type: integer
refreshInterval:
description: RefreshInterval configures the refresh interval
at which Prometheus will re-read the instance list.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
resourceGroup:
description: Optional resource group name. Limits discovery
to this resource group.
type: string
subscriptionID:
description: The subscription ID. Always required.
minLength: 1
type: string
tenantID:
description: Optional tenant ID. Only required with the OAuth
authentication method.
type: string
required:
- subscriptionID
type: object
type: array
basicAuth:
description: BasicAuth information to use on every scrape request.
properties:
password:
description: '`password` specifies a key of a Secret containing
the password for authentication.'
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a Secret containing
the username for authentication.'
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
consulSDConfigs:
description: ConsulSDConfigs defines a list of Consul service discovery
configurations.
items:
description: ConsulSDConfig defines a Consul service discovery configuration
See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config
properties:
allowStale:
description: Allow stale Consul results (see https://www.consul.io/api/features/consistency.html).
Will reduce load on Consul. If unset, Prometheus uses its
default value.
type: boolean
authorization:
description: Authorization header configuration to authenticate
against the Consul Server.
properties:
credentials:
description: Selects a key of a Secret in the namespace
that contains the credentials for authentication.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type. The value
is case-insensitive. \n \"Basic\" is not a supported value.
\n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: 'BasicAuth information to authenticate against
the Consul Server. More info: https://prometheus.io/docs/operating/configuration/#endpoints'
properties:
password:
description: '`password` specifies a key of a Secret containing
the password for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a Secret containing
the username for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
datacenter:
description: Consul Datacenter name, if not provided it will
use the local Consul Agent Datacenter.
type: string
enableHTTP2:
description: Whether to enable HTTP2. If unset, Prometheus uses
its default value.
type: boolean
followRedirects:
description: Configure whether HTTP requests follow HTTP 3xx
redirects. If unset, Prometheus uses its default value.
type: boolean
namespace:
description: Namespaces are only supported in Consul Enterprise.
type: string
noProxy:
description: "`noProxy` is a comma-separated string that can
contain IPs, CIDR notation, domain names that should be excluded
from proxying. IP and domain names can contain port numbers.
\n It requires Prometheus >= v2.43.0."
type: string
nodeMeta:
additionalProperties:
type: string
description: Node metadata key/value pairs to filter nodes for
a given service.
type: object
x-kubernetes-map-type: atomic
oauth2:
description: Optional OAuth 2.0 configuration.
properties:
clientId:
description: '`clientId` specifies a key of a Secret or
ConfigMap containing the OAuth2 client''s ID.'
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of a Secret
containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the HTTP parameters
to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes used for
the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to fetch the
token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
partition:
description: Admin Partitions are only supported in Consul Enterprise.
type: string
proxyConnectHeader:
additionalProperties:
description: SecretKeySelector selects a key of a Secret.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
description: "ProxyConnectHeader optionally specifies headers
to send to proxies during CONNECT requests. \n It requires
Prometheus >= v2.43.0."
type: object
x-kubernetes-map-type: atomic
proxyFromEnvironment:
description: "Whether to use the proxy configuration defined
by environment variables (HTTP_PROXY, HTTPS_PROXY, and NO_PROXY).
If unset, Prometheus uses its default value. \n It requires
Prometheus >= v2.43.0."
type: boolean
proxyUrl:
description: "`proxyURL` defines the HTTP proxy server to use.
\n It requires Prometheus >= v2.43.0."
pattern: ^http(s)?://.+$
type: string
refreshInterval:
description: The time after which the provided names are refreshed.
On large setup it might be a good idea to increase this value
because the catalog will change all the time. If unset, Prometheus
uses its default value.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
scheme:
description: HTTP Scheme default "http"
enum:
- HTTP
- HTTPS
type: string
server:
description: A valid string consisting of a hostname or IP followed
by an optional port number.
minLength: 1
type: string
services:
description: A list of services for which targets are retrieved.
If omitted, all services are scraped.
items:
type: string
type: array
x-kubernetes-list-type: atomic
tagSeparator:
description: The string by which Consul tags are joined into
the tag label. If unset, Prometheus uses its default value.
type: string
tags:
description: An optional list of tags used to filter nodes for
a given service. Services must contain all tags in the list.
items:
type: string
type: array
x-kubernetes-list-type: atomic
tlsConfig:
description: TLS Config
properties:
ca:
description: Certificate authority used when verifying server
certificates.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key file for the
targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
tokenRef:
description: Consul ACL TokenRef, if not provided it will use
the ACL from the local Consul Agent.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
required:
- server
type: object
type: array
dnsSDConfigs:
description: DNSSDConfigs defines a list of DNS service discovery
configurations.
items:
description: DNSSDConfig allows specifying a set of DNS domain names
which are periodically queried to discover a list of targets.
The DNS servers to be contacted are read from /etc/resolv.conf.
See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config
properties:
names:
description: A list of DNS domain names to be queried.
items:
type: string
minItems: 1
type: array
port:
description: The port number used if the query type is not SRV
Ignored for SRV records
type: integer
refreshInterval:
description: RefreshInterval configures the time after which
the provided names are refreshed. If not set, Prometheus uses
its default value.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
type:
description: The type of DNS query to perform. One of SRV, A,
AAAA or MX. If not set, Prometheus uses its default value.
enum:
- SRV
- A
- AAAA
- MX
type: string
required:
- names
type: object
type: array
ec2SDConfigs:
description: EC2SDConfigs defines a list of EC2 service discovery
configurations.
items:
description: EC2SDConfig allow retrieving scrape targets from AWS
EC2 instances. The private IP address is used by default, but
may be changed to the public IP address with relabeling. The IAM
credentials used must have the ec2:DescribeInstances permission
to discover scrape targets See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config
properties:
accessKey:
description: AccessKey is the AWS API key.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
filters:
description: 'Filters can be used optionally to filter the instance
list by other criteria. Available filter criteria can be found
here: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html
Filter API documentation: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Filter.html'
items:
description: EC2Filter is the configuration for filtering
EC2 instances.
properties:
name:
type: string
values:
items:
type: string
type: array
required:
- name
- values
type: object
type: array
port:
description: The port to scrape metrics from. If using the public
IP address, this must instead be specified in the relabeling
rule.
type: integer
refreshInterval:
description: RefreshInterval configures the refresh interval
at which Prometheus will re-read the instance list.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
region:
description: The AWS region
type: string
roleARN:
description: AWS Role ARN, an alternative to using AWS API keys.
type: string
secretKey:
description: SecretKey is the AWS API secret.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
type: array
fileSDConfigs:
description: FileSDConfigs defines a list of file service discovery
configurations.
items:
description: FileSDConfig defines a Prometheus file service discovery
configuration See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config
properties:
files:
description: 'List of files to be used for file discovery. Recommendation:
use absolute paths. While relative paths work, the prometheus-operator
project makes no guarantees about the working directory where
the configuration file is stored. Files must be mounted using
Prometheus.ConfigMaps or Prometheus.Secrets.'
items:
description: SDFile represents a file used for service discovery
pattern: ^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$
type: string
minItems: 1
type: array
refreshInterval:
description: RefreshInterval configures the refresh interval
at which Prometheus will reload the content of the files.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
required:
- files
type: object
type: array
gceSDConfigs:
description: GCESDConfigs defines a list of GCE service discovery
configurations.
items:
description: "GCESDConfig configures scrape targets from GCP GCE
instances. The private IP address is used by default, but may
be changed to the public IP address with relabeling. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#gce_sd_config
\n The GCE service discovery will load the Google Cloud credentials
from the file specified by the GOOGLE_APPLICATION_CREDENTIALS
environment variable. See https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform
\n A pre-requisite for using GCESDConfig is that a Secret containing
valid Google Cloud credentials is mounted into the Prometheus
or PrometheusAgent pod via the `.spec.secrets` field and that
the GOOGLE_APPLICATION_CREDENTIALS environment variable is set
to /etc/prometheus/secrets//."
properties:
filter:
description: 'Filter can be used optionally to filter the instance
list by other criteria Syntax of this filter is described
in the filter query parameter section: https://cloud.google.com/compute/docs/reference/latest/instances/list'
type: string
port:
description: The port to scrape metrics from. If using the public
IP address, this must instead be specified in the relabeling
rule.
type: integer
project:
description: The Google Cloud Project ID
minLength: 1
type: string
refreshInterval:
description: RefreshInterval configures the refresh interval
at which Prometheus will re-read the instance list.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
tagSeparator:
description: The tag separator is used to separate the tags
on concatenation
type: string
zone:
description: The zone of the scrape targets. If you need multiple
zones use multiple GCESDConfigs.
minLength: 1
type: string
required:
- project
- zone
type: object
type: array
honorLabels:
description: HonorLabels chooses the metric's labels on collisions
with target labels.
type: boolean
honorTimestamps:
description: HonorTimestamps controls whether Prometheus respects
the timestamps present in scraped data.
type: boolean
httpSDConfigs:
description: HTTPSDConfigs defines a list of HTTP service discovery
configurations.
items:
description: HTTPSDConfig defines a prometheus HTTP service discovery
configuration See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#http_sd_config
properties:
authorization:
description: Authorization header configuration to authenticate
against the target HTTP endpoint.
properties:
credentials:
description: Selects a key of a Secret in the namespace
that contains the credentials for authentication.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type. The value
is case-insensitive. \n \"Basic\" is not a supported value.
\n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: 'BasicAuth information to authenticate against
the target HTTP endpoint. More info: https://prometheus.io/docs/operating/configuration/#endpoints'
properties:
password:
description: '`password` specifies a key of a Secret containing
the password for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a Secret containing
the username for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
noProxy:
description: "`noProxy` is a comma-separated string that can
contain IPs, CIDR notation, domain names that should be excluded
from proxying. IP and domain names can contain port numbers.
\n It requires Prometheus >= v2.43.0."
type: string
proxyConnectHeader:
additionalProperties:
description: SecretKeySelector selects a key of a Secret.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
description: "ProxyConnectHeader optionally specifies headers
to send to proxies during CONNECT requests. \n It requires
Prometheus >= v2.43.0."
type: object
x-kubernetes-map-type: atomic
proxyFromEnvironment:
description: "Whether to use the proxy configuration defined
by environment variables (HTTP_PROXY, HTTPS_PROXY, and NO_PROXY).
If unset, Prometheus uses its default value. \n It requires
Prometheus >= v2.43.0."
type: boolean
proxyUrl:
description: "`proxyURL` defines the HTTP proxy server to use.
\n It requires Prometheus >= v2.43.0."
pattern: ^http(s)?://.+$
type: string
refreshInterval:
description: RefreshInterval configures the refresh interval
at which Prometheus will re-query the endpoint to update the
target list.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
tlsConfig:
description: TLS configuration applying to the target HTTP endpoint.
properties:
ca:
description: Certificate authority used when verifying server
certificates.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key file for the
targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
url:
description: URL from which the targets are fetched.
minLength: 1
pattern: ^http(s)?://.+$
type: string
required:
- url
type: object
type: array
keepDroppedTargets:
description: "Per-scrape limit on the number of targets dropped by
relabeling that will be kept in memory. 0 means no limit. \n It
requires Prometheus >= v2.47.0."
format: int64
type: integer
kubernetesSDConfigs:
description: KubernetesSDConfigs defines a list of Kubernetes service
discovery configurations.
items:
description: KubernetesSDConfig allows retrieving scrape targets
from Kubernetes' REST API. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config
properties:
apiServer:
description: The API server address consisting of a hostname
or IP address followed by an optional port number. If left
empty, Prometheus is assumed to run inside of the cluster.
It will discover API servers automatically and use the pod's
CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/.
type: string
attachMetadata:
description: Optional metadata to attach to discovered targets.
It requires Prometheus >= v2.35.0 for `pod` role and Prometheus
>= v2.37.0 for `endpoints` and `endpointslice` roles.
properties:
node:
description: Attaches node metadata to discovered targets.
When set to true, Prometheus must have the `get` permission
on the `Nodes` objects. Only valid for Pod, Endpoint and
Endpointslice roles.
type: boolean
type: object
authorization:
description: Authorization header to use on every scrape request.
Cannot be set at the same time as `basicAuth`, or `oauth2`.
properties:
credentials:
description: Selects a key of a Secret in the namespace
that contains the credentials for authentication.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type. The value
is case-insensitive. \n \"Basic\" is not a supported value.
\n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: BasicAuth information to use on every scrape request.
Cannot be set at the same time as `authorization`, or `oauth2`.
properties:
password:
description: '`password` specifies a key of a Secret containing
the password for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a Secret containing
the username for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
enableHTTP2:
description: Whether to enable HTTP2.
type: boolean
followRedirects:
description: Configure whether HTTP requests follow HTTP 3xx
redirects.
type: boolean
namespaces:
description: Optional namespace discovery. If omitted, Prometheus
discovers targets across all namespaces.
properties:
names:
description: List of namespaces where to watch for resources.
If empty and `ownNamespace` isn't true, Prometheus watches
for resources in all namespaces.
items:
type: string
type: array
ownNamespace:
description: Includes the namespace in which the Prometheus
pod exists to the list of watched namesapces.
type: boolean
type: object
noProxy:
description: "`noProxy` is a comma-separated string that can
contain IPs, CIDR notation, domain names that should be excluded
from proxying. IP and domain names can contain port numbers.
\n It requires Prometheus >= v2.43.0."
type: string
oauth2:
description: Optional OAuth 2.0 configuration. Cannot be set
at the same time as `authorization`, or `basicAuth`.
properties:
clientId:
description: '`clientId` specifies a key of a Secret or
ConfigMap containing the OAuth2 client''s ID.'
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of a Secret
containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the HTTP parameters
to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes used for
the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to fetch the
token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
proxyConnectHeader:
additionalProperties:
description: SecretKeySelector selects a key of a Secret.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
description: "ProxyConnectHeader optionally specifies headers
to send to proxies during CONNECT requests. \n It requires
Prometheus >= v2.43.0."
type: object
x-kubernetes-map-type: atomic
proxyFromEnvironment:
description: "Whether to use the proxy configuration defined
by environment variables (HTTP_PROXY, HTTPS_PROXY, and NO_PROXY).
If unset, Prometheus uses its default value. \n It requires
Prometheus >= v2.43.0."
type: boolean
proxyUrl:
description: "`proxyURL` defines the HTTP proxy server to use.
\n It requires Prometheus >= v2.43.0."
pattern: ^http(s)?://.+$
type: string
role:
description: Role of the Kubernetes entities that should be
discovered.
enum:
- Node
- node
- Service
- service
- Pod
- pod
- Endpoints
- endpoints
- EndpointSlice
- endpointslice
- Ingress
- ingress
type: string
selectors:
description: Selector to select objects.
items:
description: K8SSelectorConfig is Kubernetes Selector Config
properties:
field:
type: string
label:
type: string
role:
description: Role is role of the service in Kubernetes.
enum:
- Node
- node
- Service
- service
- Pod
- pod
- Endpoints
- endpoints
- EndpointSlice
- endpointslice
- Ingress
- ingress
type: string
required:
- role
type: object
type: array
x-kubernetes-list-map-keys:
- role
x-kubernetes-list-type: map
tlsConfig:
description: TLS configuration to use on every scrape request.
properties:
ca:
description: Certificate authority used when verifying server
certificates.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key file for the
targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
required:
- role
type: object
type: array
labelLimit:
description: Per-scrape limit on number of labels that will be accepted
for a sample. Only valid in Prometheus versions 2.27.0 and newer.
format: int64
type: integer
labelNameLengthLimit:
description: Per-scrape limit on length of labels name that will be
accepted for a sample. Only valid in Prometheus versions 2.27.0
and newer.
format: int64
type: integer
labelValueLengthLimit:
description: Per-scrape limit on length of labels value that will
be accepted for a sample. Only valid in Prometheus versions 2.27.0
and newer.
format: int64
type: integer
metricRelabelings:
description: MetricRelabelConfigs to apply to samples before ingestion.
items:
description: "RelabelConfig allows dynamic rewriting of the label
set for targets, alerts, scraped samples and remote write samples.
\n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config"
properties:
action:
default: replace
description: "Action to perform based on the regex matching.
\n `Uppercase` and `Lowercase` actions require Prometheus
>= v2.36.0. `DropEqual` and `KeepEqual` actions require Prometheus
>= v2.41.0. \n Default: \"Replace\""
enum:
- replace
- Replace
- keep
- Keep
- drop
- Drop
- hashmod
- HashMod
- labelmap
- LabelMap
- labeldrop
- LabelDrop
- labelkeep
- LabelKeep
- lowercase
- Lowercase
- uppercase
- Uppercase
- keepequal
- KeepEqual
- dropequal
- DropEqual
type: string
modulus:
description: "Modulus to take of the hash of the source label
values. \n Only applicable when the action is `HashMod`."
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched.
type: string
replacement:
description: "Replacement value against which a Replace action
is performed if the regular expression matches. \n Regex capture
groups are available."
type: string
separator:
description: Separator is the string between concatenated SourceLabels.
type: string
sourceLabels:
description: The source labels select values from existing labels.
Their content is concatenated using the configured Separator
and matched against the configured regular expression.
items:
description: LabelName is a valid Prometheus label name which
may only contain ASCII letters, numbers, as well as underscores.
pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
type: string
type: array
targetLabel:
description: "Label to which the resulting string is written
in a replacement. \n It is mandatory for `Replace`, `HashMod`,
`Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions.
\n Regex capture groups are available."
type: string
type: object
type: array
metricsPath:
description: MetricsPath HTTP path to scrape for metrics. If empty,
Prometheus uses the default value (e.g. /metrics).
type: string
noProxy:
description: "`noProxy` is a comma-separated string that can contain
IPs, CIDR notation, domain names that should be excluded from proxying.
IP and domain names can contain port numbers. \n It requires Prometheus
>= v2.43.0."
type: string
openstackSDConfigs:
description: OpenStackSDConfigs defines a list of OpenStack service
discovery configurations.
items:
description: OpenStackSDConfig allow retrieving scrape targets from
OpenStack Nova instances. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config
properties:
allTenants:
description: Whether the service discovery should list all instances
for all projects. It is only relevant for the 'instance' role
and usually requires admin permissions.
type: boolean
applicationCredentialId:
description: ApplicationCredentialID
type: string
applicationCredentialName:
description: The ApplicationCredentialID or ApplicationCredentialName
fields are required if using an application credential to
authenticate. Some providers allow you to create an application
credential to authenticate rather than a password.
type: string
applicationCredentialSecret:
description: The applicationCredentialSecret field is required
if using an application credential to authenticate.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
availability:
description: Availability of the endpoint to connect to.
enum:
- Public
- public
- Admin
- admin
- Internal
- internal
type: string
domainID:
description: DomainID
type: string
domainName:
description: At most one of domainId and domainName must be
provided if using username with Identity V3. Otherwise, either
are optional.
type: string
identityEndpoint:
description: IdentityEndpoint specifies the HTTP endpoint that
is required to work with the Identity API of the appropriate
version.
type: string
password:
description: Password for the Identity V2 and V3 APIs. Consult
with your provider's control panel to discover your account's
preferred method of authentication.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
port:
description: The port to scrape metrics from. If using the public
IP address, this must instead be specified in the relabeling
rule.
type: integer
projectID:
description: ProjectID
type: string
projectName:
description: The ProjectId and ProjectName fields are optional
for the Identity V2 API. Some providers allow you to specify
a ProjectName instead of the ProjectId. Some require both.
Your provider's authentication policies will determine how
these fields influence authentication.
type: string
refreshInterval:
description: Refresh interval to re-read the instance list.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
region:
description: The OpenStack Region.
minLength: 1
type: string
role:
description: The OpenStack role of entities that should be discovered.
enum:
- Instance
- instance
- Hypervisor
- hypervisor
type: string
tlsConfig:
description: TLS configuration applying to the target HTTP endpoint.
properties:
ca:
description: Certificate authority used when verifying server
certificates.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key file for the
targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
userid:
description: UserID
type: string
username:
description: Username is required if using Identity V2 API.
Consult with your provider's control panel to discover your
account's username. In Identity V3, either userid or a combination
of username and domainId or domainName are needed
type: string
required:
- region
- role
type: object
type: array
params:
additionalProperties:
items:
type: string
type: array
description: Optional HTTP URL parameters
type: object
x-kubernetes-map-type: atomic
proxyConnectHeader:
additionalProperties:
description: SecretKeySelector selects a key of a Secret.
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
description: "ProxyConnectHeader optionally specifies headers to send
to proxies during CONNECT requests. \n It requires Prometheus >=
v2.43.0."
type: object
x-kubernetes-map-type: atomic
proxyFromEnvironment:
description: "Whether to use the proxy configuration defined by environment
variables (HTTP_PROXY, HTTPS_PROXY, and NO_PROXY). If unset, Prometheus
uses its default value. \n It requires Prometheus >= v2.43.0."
type: boolean
proxyUrl:
description: "`proxyURL` defines the HTTP proxy server to use. \n
It requires Prometheus >= v2.43.0."
pattern: ^http(s)?://.+$
type: string
relabelings:
description: 'RelabelConfigs defines how to rewrite the target''s
labels before scraping. Prometheus Operator automatically adds relabelings
for a few standard Kubernetes fields. The original scrape job''s
name is available via the `__tmp_prometheus_job_name` label. More
info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config'
items:
description: "RelabelConfig allows dynamic rewriting of the label
set for targets, alerts, scraped samples and remote write samples.
\n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config"
properties:
action:
default: replace
description: "Action to perform based on the regex matching.
\n `Uppercase` and `Lowercase` actions require Prometheus
>= v2.36.0. `DropEqual` and `KeepEqual` actions require Prometheus
>= v2.41.0. \n Default: \"Replace\""
enum:
- replace
- Replace
- keep
- Keep
- drop
- Drop
- hashmod
- HashMod
- labelmap
- LabelMap
- labeldrop
- LabelDrop
- labelkeep
- LabelKeep
- lowercase
- Lowercase
- uppercase
- Uppercase
- keepequal
- KeepEqual
- dropequal
- DropEqual
type: string
modulus:
description: "Modulus to take of the hash of the source label
values. \n Only applicable when the action is `HashMod`."
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched.
type: string
replacement:
description: "Replacement value against which a Replace action
is performed if the regular expression matches. \n Regex capture
groups are available."
type: string
separator:
description: Separator is the string between concatenated SourceLabels.
type: string
sourceLabels:
description: The source labels select values from existing labels.
Their content is concatenated using the configured Separator
and matched against the configured regular expression.
items:
description: LabelName is a valid Prometheus label name which
may only contain ASCII letters, numbers, as well as underscores.
pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
type: string
type: array
targetLabel:
description: "Label to which the resulting string is written
in a replacement. \n It is mandatory for `Replace`, `HashMod`,
`Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions.
\n Regex capture groups are available."
type: string
type: object
type: array
sampleLimit:
description: SampleLimit defines per-scrape limit on number of scraped
samples that will be accepted.
format: int64
type: integer
scheme:
description: Configures the protocol scheme used for requests. If
empty, Prometheus uses HTTP by default.
enum:
- HTTP
- HTTPS
type: string
scrapeInterval:
description: ScrapeInterval is the interval between consecutive scrapes.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
scrapeTimeout:
description: ScrapeTimeout is the number of seconds to wait until
a scrape request times out.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
staticConfigs:
description: StaticConfigs defines a list of static targets with a
common label set.
items:
description: StaticConfig defines a Prometheus static configuration.
See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config
properties:
labels:
additionalProperties:
type: string
description: Labels assigned to all metrics scraped from the
targets.
type: object
x-kubernetes-map-type: atomic
targets:
description: List of targets for this static configuration.
items:
description: Target represents a target for Prometheus to
scrape
type: string
type: array
type: object
type: array
targetLimit:
description: TargetLimit defines a limit on the number of scraped
targets that will be accepted.
format: int64
type: integer
tlsConfig:
description: TLS configuration to use on every scrape request
properties:
ca:
description: Certificate authority used when verifying server
certificates.
properties:
configMap:
description: ConfigMap containing data to use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
cert:
description: Client certificate to present when doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keySecret:
description: Secret containing the client key file for the targets.
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
trackTimestampsStaleness:
description: TrackTimestampsStaleness whether Prometheus tracks staleness
of the metrics that have an explicit timestamp present in scraped
data. Has no effect if `honorTimestamps` is false. It requires Prometheus
>= v2.48.0.
type: boolean
type: object
required:
- spec
type: object
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.13.0
operator.prometheus.io/version: 0.71.2
name: servicemonitors.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
categories:
- prometheus-operator
kind: ServiceMonitor
listKind: ServiceMonitorList
plural: servicemonitors
shortNames:
- smon
singular: servicemonitor
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: ServiceMonitor defines monitoring for a set of services.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Specification of desired Service selection for target discovery
by Prometheus.
properties:
attachMetadata:
description: "`attachMetadata` defines additional metadata which is
added to the discovered targets. \n It requires Prometheus >= v2.37.0."
properties:
node:
description: When set to true, Prometheus must have the `get`
permission on the `Nodes` objects.
type: boolean
type: object
endpoints:
description: List of endpoints part of this ServiceMonitor.
items:
description: Endpoint defines an endpoint serving Prometheus metrics
to be scraped by Prometheus.
properties:
authorization:
description: "`authorization` configures the Authorization header
credentials to use when scraping the target. \n Cannot be
set at the same time as `basicAuth`, or `oauth2`."
properties:
credentials:
description: Selects a key of a Secret in the namespace
that contains the credentials for authentication.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type:
description: "Defines the authentication type. The value
is case-insensitive. \n \"Basic\" is not a supported value.
\n Default: \"Bearer\""
type: string
type: object
basicAuth:
description: "`basicAuth` configures the Basic Authentication
credentials to use when scraping the target. \n Cannot be
set at the same time as `authorization`, or `oauth2`."
properties:
password:
description: '`password` specifies a key of a Secret containing
the password for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
username:
description: '`username` specifies a key of a Secret containing
the username for authentication.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
bearerTokenFile:
description: "File to read bearer token for scraping the target.
\n Deprecated: use `authorization` instead."
type: string
bearerTokenSecret:
description: "`bearerTokenSecret` specifies a key of a Secret
containing the bearer token for scraping targets. The secret
needs to be in the same namespace as the ServiceMonitor object
and readable by the Prometheus Operator. \n Deprecated: use
`authorization` instead."
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
enableHttp2:
description: '`enableHttp2` can be used to disable HTTP2 when
scraping the target.'
type: boolean
filterRunning:
description: "When true, the pods which are not running (e.g.
either in Failed or Succeeded state) are dropped during the
target discovery. \n If unset, the filtering is enabled. \n
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase"
type: boolean
followRedirects:
description: '`followRedirects` defines whether the scrape requests
should follow HTTP 3xx redirects.'
type: boolean
honorLabels:
description: When true, `honorLabels` preserves the metric's
labels when they collide with the target's labels.
type: boolean
honorTimestamps:
description: '`honorTimestamps` controls whether Prometheus
preserves the timestamps when exposed by the target.'
type: boolean
interval:
description: "Interval at which Prometheus scrapes the metrics
from the target. \n If empty, Prometheus uses the global scrape
interval."
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
metricRelabelings:
description: '`metricRelabelings` configures the relabeling
rules to apply to the samples before ingestion.'
items:
description: "RelabelConfig allows dynamic rewriting of the
label set for targets, alerts, scraped samples and remote
write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config"
properties:
action:
default: replace
description: "Action to perform based on the regex matching.
\n `Uppercase` and `Lowercase` actions require Prometheus
>= v2.36.0. `DropEqual` and `KeepEqual` actions require
Prometheus >= v2.41.0. \n Default: \"Replace\""
enum:
- replace
- Replace
- keep
- Keep
- drop
- Drop
- hashmod
- HashMod
- labelmap
- LabelMap
- labeldrop
- LabelDrop
- labelkeep
- LabelKeep
- lowercase
- Lowercase
- uppercase
- Uppercase
- keepequal
- KeepEqual
- dropequal
- DropEqual
type: string
modulus:
description: "Modulus to take of the hash of the source
label values. \n Only applicable when the action is
`HashMod`."
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched.
type: string
replacement:
description: "Replacement value against which a Replace
action is performed if the regular expression matches.
\n Regex capture groups are available."
type: string
separator:
description: Separator is the string between concatenated
SourceLabels.
type: string
sourceLabels:
description: The source labels select values from existing
labels. Their content is concatenated using the configured
Separator and matched against the configured regular
expression.
items:
description: LabelName is a valid Prometheus label name
which may only contain ASCII letters, numbers, as
well as underscores.
pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
type: string
type: array
targetLabel:
description: "Label to which the resulting string is written
in a replacement. \n It is mandatory for `Replace`,
`HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and
`DropEqual` actions. \n Regex capture groups are available."
type: string
type: object
type: array
oauth2:
description: "`oauth2` configures the OAuth2 settings to use
when scraping the target. \n It requires Prometheus >= 2.27.0.
\n Cannot be set at the same time as `authorization`, or `basicAuth`."
properties:
clientId:
description: '`clientId` specifies a key of a Secret or
ConfigMap containing the OAuth2 client''s ID.'
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
clientSecret:
description: '`clientSecret` specifies a key of a Secret
containing the OAuth2 client''s secret.'
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
endpointParams:
additionalProperties:
type: string
description: '`endpointParams` configures the HTTP parameters
to append to the token URL.'
type: object
scopes:
description: '`scopes` defines the OAuth2 scopes used for
the token request.'
items:
type: string
type: array
tokenUrl:
description: '`tokenURL` configures the URL to fetch the
token from.'
minLength: 1
type: string
required:
- clientId
- clientSecret
- tokenUrl
type: object
params:
additionalProperties:
items:
type: string
type: array
description: params define optional HTTP URL parameters.
type: object
path:
description: "HTTP path from which to scrape for metrics. \n
If empty, Prometheus uses the default value (e.g. `/metrics`)."
type: string
port:
description: "Name of the Service port which this endpoint refers
to. \n It takes precedence over `targetPort`."
type: string
proxyUrl:
description: '`proxyURL` configures the HTTP Proxy URL (e.g.
"http://proxyserver:2195") to go through when scraping the
target.'
type: string
relabelings:
description: "`relabelings` configures the relabeling rules
to apply the target's metadata labels. \n The Operator automatically
adds relabelings for a few standard Kubernetes fields. \n
The original scrape job's name is available via the `__tmp_prometheus_job_name`
label. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config"
items:
description: "RelabelConfig allows dynamic rewriting of the
label set for targets, alerts, scraped samples and remote
write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config"
properties:
action:
default: replace
description: "Action to perform based on the regex matching.
\n `Uppercase` and `Lowercase` actions require Prometheus
>= v2.36.0. `DropEqual` and `KeepEqual` actions require
Prometheus >= v2.41.0. \n Default: \"Replace\""
enum:
- replace
- Replace
- keep
- Keep
- drop
- Drop
- hashmod
- HashMod
- labelmap
- LabelMap
- labeldrop
- LabelDrop
- labelkeep
- LabelKeep
- lowercase
- Lowercase
- uppercase
- Uppercase
- keepequal
- KeepEqual
- dropequal
- DropEqual
type: string
modulus:
description: "Modulus to take of the hash of the source
label values. \n Only applicable when the action is
`HashMod`."
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched.
type: string
replacement:
description: "Replacement value against which a Replace
action is performed if the regular expression matches.
\n Regex capture groups are available."
type: string
separator:
description: Separator is the string between concatenated
SourceLabels.
type: string
sourceLabels:
description: The source labels select values from existing
labels. Their content is concatenated using the configured
Separator and matched against the configured regular
expression.
items:
description: LabelName is a valid Prometheus label name
which may only contain ASCII letters, numbers, as
well as underscores.
pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
type: string
type: array
targetLabel:
description: "Label to which the resulting string is written
in a replacement. \n It is mandatory for `Replace`,
`HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and
`DropEqual` actions. \n Regex capture groups are available."
type: string
type: object
type: array
scheme:
description: "HTTP scheme to use for scraping. \n `http` and
`https` are the expected values unless you rewrite the `__scheme__`
label via relabeling. \n If empty, Prometheus uses the default
value `http`."
enum:
- http
- https
type: string
scrapeTimeout:
description: "Timeout after which Prometheus considers the scrape
to be failed. \n If empty, Prometheus uses the global scrape
timeout unless it is less than the target's scrape interval
value in which the latter is used."
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
targetPort:
anyOf:
- type: integer
- type: string
description: "Name or number of the target port of the `Pod`
object behind the Service, the port must be specified with
container port property. \n Deprecated: use `port` instead."
x-kubernetes-int-or-string: true
tlsConfig:
description: TLS configuration to use when scraping the target.
properties:
ca:
description: Certificate authority used when verifying server
certificates.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
caFile:
description: Path to the CA cert in the Prometheus container
to use for the targets.
type: string
cert:
description: Client certificate to present when doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to use for the
targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
certFile:
description: Path to the client cert file in the Prometheus
container for the targets.
type: string
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keyFile:
description: Path to the client key file in the Prometheus
container for the targets.
type: string
keySecret:
description: Secret containing the client key file for the
targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
trackTimestampsStaleness:
description: "`trackTimestampsStaleness` defines whether Prometheus
tracks staleness of the metrics that have an explicit timestamp
present in scraped data. Has no effect if `honorTimestamps`
is false. \n It requires Prometheus >= v2.48.0."
type: boolean
type: object
type: array
jobLabel:
description: "`jobLabel` selects the label from the associated Kubernetes
`Service` object which will be used as the `job` label for all metrics.
\n For example if `jobLabel` is set to `foo` and the Kubernetes
`Service` object is labeled with `foo: bar`, then Prometheus adds
the `job=\"bar\"` label to all ingested metrics. \n If the value
of this field is empty or if the label doesn't exist for the given
Service, the `job` label of the metrics defaults to the name of
the associated Kubernetes `Service`."
type: string
keepDroppedTargets:
description: "Per-scrape limit on the number of targets dropped by
relabeling that will be kept in memory. 0 means no limit. \n It
requires Prometheus >= v2.47.0."
format: int64
type: integer
labelLimit:
description: "Per-scrape limit on number of labels that will be accepted
for a sample. \n It requires Prometheus >= v2.27.0."
format: int64
type: integer
labelNameLengthLimit:
description: "Per-scrape limit on length of labels name that will
be accepted for a sample. \n It requires Prometheus >= v2.27.0."
format: int64
type: integer
labelValueLengthLimit:
description: "Per-scrape limit on length of labels value that will
be accepted for a sample. \n It requires Prometheus >= v2.27.0."
format: int64
type: integer
namespaceSelector:
description: Selector to select which namespaces the Kubernetes `Endpoints`
objects are discovered from.
properties:
any:
description: Boolean describing whether all namespaces are selected
in contrast to a list restricting them.
type: boolean
matchNames:
description: List of namespace names to select from.
items:
type: string
type: array
type: object
podTargetLabels:
description: '`podTargetLabels` defines the labels which are transferred
from the associated Kubernetes `Pod` object onto the ingested metrics.'
items:
type: string
type: array
sampleLimit:
description: '`sampleLimit` defines a per-scrape limit on the number
of scraped samples that will be accepted.'
format: int64
type: integer
selector:
description: Label selector to select the Kubernetes `Endpoints` objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
targetLabels:
description: '`targetLabels` defines the labels which are transferred
from the associated Kubernetes `Service` object onto the ingested
metrics.'
items:
type: string
type: array
targetLimit:
description: '`targetLimit` defines a limit on the number of scraped
targets that will be accepted.'
format: int64
type: integer
required:
- selector
type: object
required:
- spec
type: object
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.13.0
operator.prometheus.io/version: 0.71.2
name: thanosrulers.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
categories:
- prometheus-operator
kind: ThanosRuler
listKind: ThanosRulerList
plural: thanosrulers
shortNames:
- ruler
singular: thanosruler
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: The version of Thanos Ruler
jsonPath: .spec.version
name: Version
type: string
- description: The number of desired replicas
jsonPath: .spec.replicas
name: Replicas
type: integer
- description: The number of ready replicas
jsonPath: .status.availableReplicas
name: Ready
type: integer
- jsonPath: .status.conditions[?(@.type == 'Reconciled')].status
name: Reconciled
type: string
- jsonPath: .status.conditions[?(@.type == 'Available')].status
name: Available
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- description: Whether the resource reconciliation is paused or not
jsonPath: .status.paused
name: Paused
priority: 1
type: boolean
name: v1
schema:
openAPIV3Schema:
description: ThanosRuler defines a ThanosRuler deployment.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: 'Specification of the desired behavior of the ThanosRuler
cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status'
properties:
additionalArgs:
description: AdditionalArgs allows setting additional arguments for
the ThanosRuler container. It is intended for e.g. activating hidden
flags which are not supported by the dedicated configuration options
yet. The arguments are passed as-is to the ThanosRuler container
which may cause issues if they are invalid or not supported by the
given ThanosRuler version. In case of an argument conflict (e.g.
an argument which is already set by the operator itself) or when
providing an invalid argument the reconciliation will fail and an
error will be logged.
items:
description: Argument as part of the AdditionalArgs list.
properties:
name:
description: Name of the argument, e.g. "scrape.discovery-reload-interval".
minLength: 1
type: string
value:
description: Argument value, e.g. 30s. Can be empty for name-only
arguments (e.g. --storage.tsdb.no-lockfile)
type: string
required:
- name
type: object
type: array
affinity:
description: If specified, the pod's scheduling constraints.
properties:
nodeAffinity:
description: Describes node affinity scheduling rules for the
pod.
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to
nodes that satisfy the affinity expressions specified by
this field, but it may choose a node that violates one or
more of the expressions. The node that is most preferred
is the one with the greatest sum of weights, i.e. for each
node that meets all of the scheduling requirements (resource
request, requiredDuringScheduling affinity expressions,
etc.), compute a sum by iterating through the elements of
this field and adding "weight" to the sum if the node matches
the corresponding matchExpressions; the node(s) with the
highest sum are the most preferred.
items:
description: An empty preferred scheduling term matches
all objects with implicit weight 0 (i.e. it's a no-op).
A null preferred scheduling term matches no objects (i.e.
is also a no-op).
properties:
preference:
description: A node selector term, associated with the
corresponding weight.
properties:
matchExpressions:
description: A list of node selector requirements
by node's labels.
items:
description: A node selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists, DoesNotExist. Gt, and
Lt.
type: string
values:
description: An array of string values. If
the operator is In or NotIn, the values
array must be non-empty. If the operator
is Exists or DoesNotExist, the values array
must be empty. If the operator is Gt or
Lt, the values array must have a single
element, which will be interpreted as an
integer. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements
by node's fields.
items:
description: A node selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists, DoesNotExist. Gt, and
Lt.
type: string
values:
description: An array of string values. If
the operator is In or NotIn, the values
array must be non-empty. If the operator
is Exists or DoesNotExist, the values array
must be empty. If the operator is Gt or
Lt, the values array must have a single
element, which will be interpreted as an
integer. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
x-kubernetes-map-type: atomic
weight:
description: Weight associated with matching the corresponding
nodeSelectorTerm, in the range 1-100.
format: int32
type: integer
required:
- preference
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified by this
field are not met at scheduling time, the pod will not be
scheduled onto the node. If the affinity requirements specified
by this field cease to be met at some point during pod execution
(e.g. due to an update), the system may or may not try to
eventually evict the pod from its node.
properties:
nodeSelectorTerms:
description: Required. A list of node selector terms.
The terms are ORed.
items:
description: A null or empty node selector term matches
no objects. The requirements of them are ANDed. The
TopologySelectorTerm type implements a subset of the
NodeSelectorTerm.
properties:
matchExpressions:
description: A list of node selector requirements
by node's labels.
items:
description: A node selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists, DoesNotExist. Gt, and
Lt.
type: string
values:
description: An array of string values. If
the operator is In or NotIn, the values
array must be non-empty. If the operator
is Exists or DoesNotExist, the values array
must be empty. If the operator is Gt or
Lt, the values array must have a single
element, which will be interpreted as an
integer. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements
by node's fields.
items:
description: A node selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists, DoesNotExist. Gt, and
Lt.
type: string
values:
description: An array of string values. If
the operator is In or NotIn, the values
array must be non-empty. If the operator
is Exists or DoesNotExist, the values array
must be empty. If the operator is Gt or
Lt, the values array must have a single
element, which will be interpreted as an
integer. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
x-kubernetes-map-type: atomic
type: array
required:
- nodeSelectorTerms
type: object
x-kubernetes-map-type: atomic
type: object
podAffinity:
description: Describes pod affinity scheduling rules (e.g. co-locate
this pod in the same node, zone, etc. as some other pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to
nodes that satisfy the affinity expressions specified by
this field, but it may choose a node that violates one or
more of the expressions. The node that is most preferred
is the one with the greatest sum of weights, i.e. for each
node that meets all of the scheduling requirements (resource
request, requiredDuringScheduling affinity expressions,
etc.), compute a sum by iterating through the elements of
this field and adding "weight" to the sum if the node has
pods which matches the corresponding podAffinityTerm; the
node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm
fields are added per-node to find the most preferred node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term, associated
with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied
to the union of the namespaces selected by this
field and the ones listed in the namespaces field.
null selector and null or empty namespaces list
means "this pod's namespace". An empty selector
({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list
of namespace names that the term applies to. The
term is applied to the union of the namespaces
listed in this field and the ones selected by
namespaceSelector. null or empty namespaces list
and null namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods
matching the labelSelector in the specified namespaces,
where co-located is defined as running on a node
whose value of the label with key topologyKey
matches that of any node on which any of the selected
pods is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching the corresponding
podAffinityTerm, in the range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified by this
field are not met at scheduling time, the pod will not be
scheduled onto the node. If the affinity requirements specified
by this field cease to be met at some point during pod execution
(e.g. due to a pod label update), the system may or may
not try to eventually evict the pod from its node. When
there are multiple elements, the lists of nodes corresponding
to each podAffinityTerm are intersected, i.e. all terms
must be satisfied.
items:
description: Defines a set of pods (namely those matching
the labelSelector relative to the given namespace(s))
that this pod should be co-located (affinity) or not co-located
(anti-affinity) with, where co-located is defined as running
on a node whose value of the label with key
matches that of any node on which a pod of the set of
pods is running
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If the
operator is Exists or DoesNotExist, the
values array must be empty. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value". The
requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied to the
union of the namespaces selected by this field and
the ones listed in the namespaces field. null selector
and null or empty namespaces list means "this pod's
namespace". An empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If the
operator is Exists or DoesNotExist, the
values array must be empty. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value". The
requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list of namespace
names that the term applies to. The term is applied
to the union of the namespaces listed in this field
and the ones selected by namespaceSelector. null or
empty namespaces list and null namespaceSelector means
"this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods matching
the labelSelector in the specified namespaces, where
co-located is defined as running on a node whose value
of the label with key topologyKey matches that of
any node on which any of the selected pods is running.
Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
podAntiAffinity:
description: Describes pod anti-affinity scheduling rules (e.g.
avoid putting this pod in the same node, zone, etc. as some
other pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to
nodes that satisfy the anti-affinity expressions specified
by this field, but it may choose a node that violates one
or more of the expressions. The node that is most preferred
is the one with the greatest sum of weights, i.e. for each
node that meets all of the scheduling requirements (resource
request, requiredDuringScheduling anti-affinity expressions,
etc.), compute a sum by iterating through the elements of
this field and adding "weight" to the sum if the node has
pods which matches the corresponding podAffinityTerm; the
node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm
fields are added per-node to find the most preferred node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term, associated
with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied
to the union of the namespaces selected by this
field and the ones listed in the namespaces field.
null selector and null or empty namespaces list
means "this pod's namespace". An empty selector
({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list
of namespace names that the term applies to. The
term is applied to the union of the namespaces
listed in this field and the ones selected by
namespaceSelector. null or empty namespaces list
and null namespaceSelector means "this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods
matching the labelSelector in the specified namespaces,
where co-located is defined as running on a node
whose value of the label with key topologyKey
matches that of any node on which any of the selected
pods is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching the corresponding
podAffinityTerm, in the range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the anti-affinity requirements specified by
this field are not met at scheduling time, the pod will
not be scheduled onto the node. If the anti-affinity requirements
specified by this field cease to be met at some point during
pod execution (e.g. due to a pod label update), the system
may or may not try to eventually evict the pod from its
node. When there are multiple elements, the lists of nodes
corresponding to each podAffinityTerm are intersected, i.e.
all terms must be satisfied.
items:
description: Defines a set of pods (namely those matching
the labelSelector relative to the given namespace(s))
that this pod should be co-located (affinity) or not co-located
(anti-affinity) with, where co-located is defined as running
on a node whose value of the label with key
matches that of any node on which a pod of the set of
pods is running
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If the
operator is Exists or DoesNotExist, the
values array must be empty. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value". The
requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaceSelector:
description: A label query over the set of namespaces
that the term applies to. The term is applied to the
union of the namespaces selected by this field and
the ones listed in the namespaces field. null selector
and null or empty namespaces list means "this pod's
namespace". An empty selector ({}) matches all namespaces.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If the
operator is Exists or DoesNotExist, the
values array must be empty. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value". The
requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
namespaces:
description: namespaces specifies a static list of namespace
names that the term applies to. The term is applied
to the union of the namespaces listed in this field
and the ones selected by namespaceSelector. null or
empty namespaces list and null namespaceSelector means
"this pod's namespace".
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods matching
the labelSelector in the specified namespaces, where
co-located is defined as running on a node whose value
of the label with key topologyKey matches that of
any node on which any of the selected pods is running.
Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
type: object
alertDropLabels:
description: AlertDropLabels configure the label names which should
be dropped in ThanosRuler alerts. The replica label `thanos_ruler_replica`
will always be dropped in alerts.
items:
type: string
type: array
alertQueryUrl:
description: The external Query URL the Thanos Ruler will set in the
'Source' field of all alerts. Maps to the '--alert.query-url' CLI
arg.
type: string
alertRelabelConfigFile:
description: AlertRelabelConfigFile specifies the path of the alert
relabeling configuration file. When used alongside with AlertRelabelConfigs,
alertRelabelConfigFile takes precedence.
type: string
alertRelabelConfigs:
description: 'AlertRelabelConfigs configures alert relabeling in ThanosRuler.
Alert relabel configurations must have the form as specified in
the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs
Alternative to AlertRelabelConfigFile, and lower order priority.'
properties:
key:
description: The key of the secret to select from. Must be a
valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
alertmanagersConfig:
description: Define configuration for connecting to alertmanager. Only
available with thanos v0.10.0 and higher. Maps to the `alertmanagers.config`
arg.
properties:
key:
description: The key of the secret to select from. Must be a
valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
alertmanagersUrl:
description: 'Define URLs to send alerts to Alertmanager. For Thanos
v0.10.0 and higher, AlertManagersConfig should be used instead. Note:
this field will be ignored if AlertManagersConfig is specified.
Maps to the `alertmanagers.url` arg.'
items:
type: string
type: array
containers:
description: 'Containers allows injecting additional containers or
modifying operator generated containers. This can be used to allow
adding an authentication proxy to a ThanosRuler pod or to change
the behavior of an operator generated container. Containers described
here modify an operator generated container if they share the same
name and modifications are done via a strategic merge patch. The
current container names are: `thanos-ruler` and `config-reloader`.
Overriding containers is entirely outside the scope of what the
maintainers will support and by doing so, you accept that this behaviour
may break at any time without notice.'
items:
description: A single application container that you want to run
within a pod.
properties:
args:
description: 'Arguments to the entrypoint. The container image''s
CMD is used if this is not provided. Variable references $(VAR_NAME)
are expanded using the container''s environment. If a variable
cannot be resolved, the reference in the input string will
be unchanged. Double $$ are reduced to a single $, which allows
for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
produce the string literal "$(VAR_NAME)". Escaped references
will never be expanded, regardless of whether the variable
exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
items:
type: string
type: array
command:
description: 'Entrypoint array. Not executed within a shell.
The container image''s ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container''s
environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME) syntax:
i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
Escaped references will never be expanded, regardless of whether
the variable exists or not. Cannot be updated. More info:
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
items:
type: string
type: array
env:
description: List of environment variables to set in the container.
Cannot be updated.
items:
description: EnvVar represents an environment variable present
in a Container.
properties:
name:
description: Name of the environment variable. Must be
a C_IDENTIFIER.
type: string
value:
description: 'Variable references $(VAR_NAME) are expanded
using the previously defined environment variables in
the container and any service environment variables.
If a variable cannot be resolved, the reference in the
input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME)
syntax: i.e. "$$(VAR_NAME)" will produce the string
literal "$(VAR_NAME)". Escaped references will never
be expanded, regardless of whether the variable exists
or not. Defaults to "".'
type: string
valueFrom:
description: Source for the environment variable's value.
Cannot be used if value is not empty.
properties:
configMapKeyRef:
description: Selects a key of a ConfigMap.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
fieldRef:
description: 'Selects a field of the pod: supports
metadata.name, metadata.namespace, `metadata.labels['''']`,
`metadata.annotations['''']`, spec.nodeName,
spec.serviceAccountName, status.hostIP, status.podIP,
status.podIPs.'
properties:
apiVersion:
description: Version of the schema the FieldPath
is written in terms of, defaults to "v1".
type: string
fieldPath:
description: Path of the field to select in the
specified API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: 'Selects a resource of the container:
only resources limits and requests (limits.cpu,
limits.memory, limits.ephemeral-storage, requests.cpu,
requests.memory and requests.ephemeral-storage)
are currently supported.'
properties:
containerName:
description: 'Container name: required for volumes,
optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format of the
exposed resources, defaults to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
description: 'Required: resource to select'
type: string
required:
- resource
type: object
x-kubernetes-map-type: atomic
secretKeyRef:
description: Selects a key of a secret in the pod's
namespace
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
required:
- name
type: object
type: array
envFrom:
description: List of sources to populate environment variables
in the container. The keys defined within a source must be
a C_IDENTIFIER. All invalid keys will be reported as an event
when the container is starting. When a key exists in multiple
sources, the value associated with the last source will take
precedence. Values defined by an Env with a duplicate key
will take precedence. Cannot be updated.
items:
description: EnvFromSource represents the source of a set
of ConfigMaps
properties:
configMapRef:
description: The ConfigMap to select from
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap must be
defined
type: boolean
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each
key in the ConfigMap. Must be a C_IDENTIFIER.
type: string
secretRef:
description: The Secret to select from
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret must be defined
type: boolean
type: object
x-kubernetes-map-type: atomic
type: object
type: array
image:
description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images
This field is optional to allow higher level config management
to default or override container images in workload controllers
like Deployments and StatefulSets.'
type: string
imagePullPolicy:
description: 'Image pull policy. One of Always, Never, IfNotPresent.
Defaults to Always if :latest tag is specified, or IfNotPresent
otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images'
type: string
lifecycle:
description: Actions that the management system should take
in response to container lifecycle events. Cannot be updated.
properties:
postStart:
description: 'PostStart is called immediately after a container
is created. If the handler fails, the container is terminated
and restarted according to its restart policy. Other management
of the container blocks until the hook completes. More
info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for
the command is root ('/') in the container's
filesystem. The command is simply exec'd, it is
not run inside a shell, so traditional shell instructions
('|', etc) won't work. To use a shell, you need
to explicitly call out to that shell. Exit status
of 0 is treated as live/healthy and non-zero is
unhealthy.
items:
type: string
type: array
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to
the pod IP. You probably want to set "Host" in
httpHeaders instead.
type: string
httpHeaders:
description: Custom headers to set in the request.
HTTP allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the
host. Defaults to HTTP.
type: string
required:
- port
type: object
tcpSocket:
description: Deprecated. TCPSocket is NOT supported
as a LifecycleHandler and kept for the backward compatibility.
There are no validation of this field and lifecycle
hooks will fail in runtime when tcp handler is specified.
properties:
host:
description: 'Optional: Host name to connect to,
defaults to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
type: object
preStop:
description: 'PreStop is called immediately before a container
is terminated due to an API request or management event
such as liveness/startup probe failure, preemption, resource
contention, etc. The handler is not called if the container
crashes or exits. The Pod''s termination grace period
countdown begins before the PreStop hook is executed.
Regardless of the outcome of the handler, the container
will eventually terminate within the Pod''s termination
grace period (unless delayed by finalizers). Other management
of the container blocks until the hook completes or until
the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for
the command is root ('/') in the container's
filesystem. The command is simply exec'd, it is
not run inside a shell, so traditional shell instructions
('|', etc) won't work. To use a shell, you need
to explicitly call out to that shell. Exit status
of 0 is treated as live/healthy and non-zero is
unhealthy.
items:
type: string
type: array
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to
the pod IP. You probably want to set "Host" in
httpHeaders instead.
type: string
httpHeaders:
description: Custom headers to set in the request.
HTTP allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the
host. Defaults to HTTP.
type: string
required:
- port
type: object
tcpSocket:
description: Deprecated. TCPSocket is NOT supported
as a LifecycleHandler and kept for the backward compatibility.
There are no validation of this field and lifecycle
hooks will fail in runtime when tcp handler is specified.
properties:
host:
description: 'Optional: Host name to connect to,
defaults to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
type: object
type: object
livenessProbe:
description: 'Periodic probe of container liveness. Container
will be restarted if the probe fails. Cannot be updated. More
info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
name:
description: Name of the container specified as a DNS_LABEL.
Each container in a pod must have a unique name (DNS_LABEL).
Cannot be updated.
type: string
ports:
description: List of ports to expose from the container. Not
specifying a port here DOES NOT prevent that port from being
exposed. Any port which is listening on the default "0.0.0.0"
address inside a container will be accessible from the network.
Modifying this array with strategic merge patch may corrupt
the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255.
Cannot be updated.
items:
description: ContainerPort represents a network port in a
single container.
properties:
containerPort:
description: Number of port to expose on the pod's IP
address. This must be a valid port number, 0 < x < 65536.
format: int32
type: integer
hostIP:
description: What host IP to bind the external port to.
type: string
hostPort:
description: Number of port to expose on the host. If
specified, this must be a valid port number, 0 < x <
65536. If HostNetwork is specified, this must match
ContainerPort. Most containers do not need this.
format: int32
type: integer
name:
description: If specified, this must be an IANA_SVC_NAME
and unique within the pod. Each named port in a pod
must have a unique name. Name for the port that can
be referred to by services.
type: string
protocol:
default: TCP
description: Protocol for port. Must be UDP, TCP, or SCTP.
Defaults to "TCP".
type: string
required:
- containerPort
type: object
type: array
x-kubernetes-list-map-keys:
- containerPort
- protocol
x-kubernetes-list-type: map
readinessProbe:
description: 'Periodic probe of container service readiness.
Container will be removed from service endpoints if the probe
fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
resizePolicy:
description: Resources resize policy for the container.
items:
description: ContainerResizePolicy represents resource resize
policy for the container.
properties:
resourceName:
description: 'Name of the resource to which this resource
resize policy applies. Supported values: cpu, memory.'
type: string
restartPolicy:
description: Restart policy to apply when specified resource
is resized. If not specified, it defaults to NotRequired.
type: string
required:
- resourceName
- restartPolicy
type: object
type: array
x-kubernetes-list-type: atomic
resources:
description: 'Compute Resources required by this container.
Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
properties:
claims:
description: "Claims lists the names of resources, defined
in spec.resourceClaims, that are used by this container.
\n This is an alpha field and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable. It can only
be set for containers."
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of one entry
in pod.spec.resourceClaims of the Pod where this
field is used. It makes that resource available
inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount of compute
resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount of compute
resources required. If Requests is omitted for a container,
it defaults to Limits if that is explicitly specified,
otherwise to an implementation-defined value. Requests
cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
restartPolicy:
description: 'RestartPolicy defines the restart behavior of
individual containers in a pod. This field may only be set
for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
the restart behavior is defined by the Pod''s restart policy
and the container type. Setting the RestartPolicy as "Always"
for the init container will have the following effect: this
init container will be continually restarted on exit until
all regular containers have terminated. Once all regular containers
have completed, all init containers with restartPolicy "Always"
will be shut down. This lifecycle differs from normal init
containers and is often referred to as a "sidecar" container.
Although this init container still starts in the init container
sequence, it does not wait for the container to complete before
proceeding to the next init container. Instead, the next init
container starts immediately after this init container is
started, or after any startupProbe has successfully completed.'
type: string
securityContext:
description: 'SecurityContext defines the security options the
container should be run with. If set, the fields of SecurityContext
override the equivalent fields of PodSecurityContext. More
info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/'
properties:
allowPrivilegeEscalation:
description: 'AllowPrivilegeEscalation controls whether
a process can gain more privileges than its parent process.
This bool directly controls if the no_new_privs flag will
be set on the container process. AllowPrivilegeEscalation
is true always when the container is: 1) run as Privileged
2) has CAP_SYS_ADMIN Note that this field cannot be set
when spec.os.name is windows.'
type: boolean
capabilities:
description: The capabilities to add/drop when running containers.
Defaults to the default set of capabilities granted by
the container runtime. Note that this field cannot be
set when spec.os.name is windows.
properties:
add:
description: Added capabilities
items:
description: Capability represent POSIX capabilities
type
type: string
type: array
drop:
description: Removed capabilities
items:
description: Capability represent POSIX capabilities
type
type: string
type: array
type: object
privileged:
description: Run container in privileged mode. Processes
in privileged containers are essentially equivalent to
root on the host. Defaults to false. Note that this field
cannot be set when spec.os.name is windows.
type: boolean
procMount:
description: procMount denotes the type of proc mount to
use for the containers. The default is DefaultProcMount
which uses the container runtime defaults for readonly
paths and masked paths. This requires the ProcMountType
feature flag to be enabled. Note that this field cannot
be set when spec.os.name is windows.
type: string
readOnlyRootFilesystem:
description: Whether this container has a read-only root
filesystem. Default is false. Note that this field cannot
be set when spec.os.name is windows.
type: boolean
runAsGroup:
description: The GID to run the entrypoint of the container
process. Uses runtime default if unset. May also be set
in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence. Note that this field cannot be set when
spec.os.name is windows.
format: int64
type: integer
runAsNonRoot:
description: Indicates that the container must run as a
non-root user. If true, the Kubelet will validate the
image at runtime to ensure that it does not run as UID
0 (root) and fail to start the container if it does. If
unset or false, no such validation will be performed.
May also be set in PodSecurityContext. If set in both
SecurityContext and PodSecurityContext, the value specified
in SecurityContext takes precedence.
type: boolean
runAsUser:
description: The UID to run the entrypoint of the container
process. Defaults to user specified in image metadata
if unspecified. May also be set in PodSecurityContext. If
set in both SecurityContext and PodSecurityContext, the
value specified in SecurityContext takes precedence. Note
that this field cannot be set when spec.os.name is windows.
format: int64
type: integer
seLinuxOptions:
description: The SELinux context to be applied to the container.
If unspecified, the container runtime will allocate a
random SELinux context for each container. May also be
set in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence. Note that this field cannot be set when
spec.os.name is windows.
properties:
level:
description: Level is SELinux level label that applies
to the container.
type: string
role:
description: Role is a SELinux role label that applies
to the container.
type: string
type:
description: Type is a SELinux type label that applies
to the container.
type: string
user:
description: User is a SELinux user label that applies
to the container.
type: string
type: object
seccompProfile:
description: The seccomp options to use by this container.
If seccomp options are provided at both the pod & container
level, the container options override the pod options.
Note that this field cannot be set when spec.os.name is
windows.
properties:
localhostProfile:
description: localhostProfile indicates a profile defined
in a file on the node should be used. The profile
must be preconfigured on the node to work. Must be
a descending path, relative to the kubelet's configured
seccomp profile location. Must be set if type is "Localhost".
Must NOT be set for any other type.
type: string
type:
description: "type indicates which kind of seccomp profile
will be applied. Valid options are: \n Localhost -
a profile defined in a file on the node should be
used. RuntimeDefault - the container runtime default
profile should be used. Unconfined - no profile should
be applied."
type: string
required:
- type
type: object
windowsOptions:
description: The Windows specific settings applied to all
containers. If unspecified, the options from the PodSecurityContext
will be used. If set in both SecurityContext and PodSecurityContext,
the value specified in SecurityContext takes precedence.
Note that this field cannot be set when spec.os.name is
linux.
properties:
gmsaCredentialSpec:
description: GMSACredentialSpec is where the GMSA admission
webhook (https://github.com/kubernetes-sigs/windows-gmsa)
inlines the contents of the GMSA credential spec named
by the GMSACredentialSpecName field.
type: string
gmsaCredentialSpecName:
description: GMSACredentialSpecName is the name of the
GMSA credential spec to use.
type: string
hostProcess:
description: HostProcess determines if a container should
be run as a 'Host Process' container. All of a Pod's
containers must have the same effective HostProcess
value (it is not allowed to have a mix of HostProcess
containers and non-HostProcess containers). In addition,
if HostProcess is true then HostNetwork must also
be set to true.
type: boolean
runAsUserName:
description: The UserName in Windows to run the entrypoint
of the container process. Defaults to the user specified
in image metadata if unspecified. May also be set
in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence.
type: string
type: object
type: object
startupProbe:
description: 'StartupProbe indicates that the Pod has successfully
initialized. If specified, no other probes are executed until
this completes successfully. If this probe fails, the Pod
will be restarted, just as if the livenessProbe failed. This
can be used to provide different probe parameters at the beginning
of a Pod''s lifecycle, when it might take a long time to load
data or warm a cache, than during steady-state operation.
This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
stdin:
description: Whether this container should allocate a buffer
for stdin in the container runtime. If this is not set, reads
from stdin in the container will always result in EOF. Default
is false.
type: boolean
stdinOnce:
description: Whether the container runtime should close the
stdin channel after it has been opened by a single attach.
When stdin is true the stdin stream will remain open across
multiple attach sessions. If stdinOnce is set to true, stdin
is opened on container start, is empty until the first client
attaches to stdin, and then remains open and accepts data
until the client disconnects, at which time stdin is closed
and remains closed until the container is restarted. If this
flag is false, a container processes that reads from stdin
will never receive an EOF. Default is false
type: boolean
terminationMessagePath:
description: 'Optional: Path at which the file to which the
container''s termination message will be written is mounted
into the container''s filesystem. Message written is intended
to be brief final status, such as an assertion failure message.
Will be truncated by the node if greater than 4096 bytes.
The total message length across all containers will be limited
to 12kb. Defaults to /dev/termination-log. Cannot be updated.'
type: string
terminationMessagePolicy:
description: Indicate how the termination message should be
populated. File will use the contents of terminationMessagePath
to populate the container status message on both success and
failure. FallbackToLogsOnError will use the last chunk of
container log output if the termination message file is empty
and the container exited with an error. The log output is
limited to 2048 bytes or 80 lines, whichever is smaller. Defaults
to File. Cannot be updated.
type: string
tty:
description: Whether this container should allocate a TTY for
itself, also requires 'stdin' to be true. Default is false.
type: boolean
volumeDevices:
description: volumeDevices is the list of block devices to be
used by the container.
items:
description: volumeDevice describes a mapping of a raw block
device within a container.
properties:
devicePath:
description: devicePath is the path inside of the container
that the device will be mapped to.
type: string
name:
description: name must match the name of a persistentVolumeClaim
in the pod
type: string
required:
- devicePath
- name
type: object
type: array
volumeMounts:
description: Pod volumes to mount into the container's filesystem.
Cannot be updated.
items:
description: VolumeMount describes a mounting of a Volume
within a container.
properties:
mountPath:
description: Path within the container at which the volume
should be mounted. Must not contain ':'.
type: string
mountPropagation:
description: mountPropagation determines how mounts are
propagated from the host to container and the other
way around. When not set, MountPropagationNone is used.
This field is beta in 1.10.
type: string
name:
description: This must match the Name of a Volume.
type: string
readOnly:
description: Mounted read-only if true, read-write otherwise
(false or unspecified). Defaults to false.
type: boolean
subPath:
description: Path within the volume from which the container's
volume should be mounted. Defaults to "" (volume's root).
type: string
subPathExpr:
description: Expanded path within the volume from which
the container's volume should be mounted. Behaves similarly
to SubPath but environment variable references $(VAR_NAME)
are expanded using the container's environment. Defaults
to "" (volume's root). SubPathExpr and SubPath are mutually
exclusive.
type: string
required:
- mountPath
- name
type: object
type: array
workingDir:
description: Container's working directory. If not specified,
the container runtime's default will be used, which might
be configured in the container image. Cannot be updated.
type: string
required:
- name
type: object
type: array
enforcedNamespaceLabel:
description: EnforcedNamespaceLabel enforces adding a namespace label
of origin for each alert and metric that is user created. The label
value will always be the namespace of the object that is being created.
type: string
evaluationInterval:
default: 15s
description: Interval between consecutive evaluations.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
excludedFromEnforcement:
description: List of references to PrometheusRule objects to be excluded
from enforcing a namespace label of origin. Applies only if enforcedNamespaceLabel
set to true.
items:
description: ObjectReference references a PodMonitor, ServiceMonitor,
Probe or PrometheusRule object.
properties:
group:
default: monitoring.coreos.com
description: Group of the referent. When not specified, it defaults
to `monitoring.coreos.com`
enum:
- monitoring.coreos.com
type: string
name:
description: Name of the referent. When not set, all resources
in the namespace are matched.
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
minLength: 1
type: string
resource:
description: Resource of the referent.
enum:
- prometheusrules
- servicemonitors
- podmonitors
- probes
- scrapeconfigs
type: string
required:
- namespace
- resource
type: object
type: array
externalPrefix:
description: The external URL the Thanos Ruler instances will be available
under. This is necessary to generate correct URLs. This is necessary
if Thanos Ruler is not served from root of a DNS name.
type: string
grpcServerTlsConfig:
description: 'GRPCServerTLSConfig configures the gRPC server from
which Thanos Querier reads recorded rule data. Note: Currently only
the CAFile, CertFile, and KeyFile fields are supported. Maps to
the ''--grpc-server-tls-*'' CLI args.'
properties:
ca:
description: Certificate authority used when verifying server
certificates.
properties:
configMap:
description: ConfigMap containing data to use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
caFile:
description: Path to the CA cert in the Prometheus container to
use for the targets.
type: string
cert:
description: Client certificate to present when doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
certFile:
description: Path to the client cert file in the Prometheus container
for the targets.
type: string
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keyFile:
description: Path to the client key file in the Prometheus container
for the targets.
type: string
keySecret:
description: Secret containing the client key file for the targets.
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
hostAliases:
description: Pods' hostAliases configuration
items:
description: HostAlias holds the mapping between IP and hostnames
that will be injected as an entry in the pod's hosts file.
properties:
hostnames:
description: Hostnames for the above IP address.
items:
type: string
type: array
ip:
description: IP address of the host file entry.
type: string
required:
- hostnames
- ip
type: object
type: array
x-kubernetes-list-map-keys:
- ip
x-kubernetes-list-type: map
image:
description: Thanos container image URL.
type: string
imagePullPolicy:
description: Image pull policy for the 'thanos', 'init-config-reloader'
and 'config-reloader' containers. See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy
for more details.
enum:
- ""
- Always
- Never
- IfNotPresent
type: string
imagePullSecrets:
description: An optional list of references to secrets in the same
namespace to use for pulling thanos images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
items:
description: LocalObjectReference contains enough information to
let you locate the referenced object inside the same namespace.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
type: array
initContainers:
description: 'InitContainers allows adding initContainers to the pod
definition. Those can be used to e.g. fetch secrets for injection
into the ThanosRuler configuration from external sources. Any errors
during the execution of an initContainer will lead to a restart
of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
Using initContainers for any use case other then secret fetching
is entirely outside the scope of what the maintainers will support
and by doing so, you accept that this behaviour may break at any
time without notice.'
items:
description: A single application container that you want to run
within a pod.
properties:
args:
description: 'Arguments to the entrypoint. The container image''s
CMD is used if this is not provided. Variable references $(VAR_NAME)
are expanded using the container''s environment. If a variable
cannot be resolved, the reference in the input string will
be unchanged. Double $$ are reduced to a single $, which allows
for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
produce the string literal "$(VAR_NAME)". Escaped references
will never be expanded, regardless of whether the variable
exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
items:
type: string
type: array
command:
description: 'Entrypoint array. Not executed within a shell.
The container image''s ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container''s
environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME) syntax:
i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
Escaped references will never be expanded, regardless of whether
the variable exists or not. Cannot be updated. More info:
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell'
items:
type: string
type: array
env:
description: List of environment variables to set in the container.
Cannot be updated.
items:
description: EnvVar represents an environment variable present
in a Container.
properties:
name:
description: Name of the environment variable. Must be
a C_IDENTIFIER.
type: string
value:
description: 'Variable references $(VAR_NAME) are expanded
using the previously defined environment variables in
the container and any service environment variables.
If a variable cannot be resolved, the reference in the
input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME)
syntax: i.e. "$$(VAR_NAME)" will produce the string
literal "$(VAR_NAME)". Escaped references will never
be expanded, regardless of whether the variable exists
or not. Defaults to "".'
type: string
valueFrom:
description: Source for the environment variable's value.
Cannot be used if value is not empty.
properties:
configMapKeyRef:
description: Selects a key of a ConfigMap.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap or
its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
fieldRef:
description: 'Selects a field of the pod: supports
metadata.name, metadata.namespace, `metadata.labels['''']`,
`metadata.annotations['''']`, spec.nodeName,
spec.serviceAccountName, status.hostIP, status.podIP,
status.podIPs.'
properties:
apiVersion:
description: Version of the schema the FieldPath
is written in terms of, defaults to "v1".
type: string
fieldPath:
description: Path of the field to select in the
specified API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: 'Selects a resource of the container:
only resources limits and requests (limits.cpu,
limits.memory, limits.ephemeral-storage, requests.cpu,
requests.memory and requests.ephemeral-storage)
are currently supported.'
properties:
containerName:
description: 'Container name: required for volumes,
optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format of the
exposed resources, defaults to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
description: 'Required: resource to select'
type: string
required:
- resource
type: object
x-kubernetes-map-type: atomic
secretKeyRef:
description: Selects a key of a secret in the pod's
namespace
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret or its
key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
required:
- name
type: object
type: array
envFrom:
description: List of sources to populate environment variables
in the container. The keys defined within a source must be
a C_IDENTIFIER. All invalid keys will be reported as an event
when the container is starting. When a key exists in multiple
sources, the value associated with the last source will take
precedence. Values defined by an Env with a duplicate key
will take precedence. Cannot be updated.
items:
description: EnvFromSource represents the source of a set
of ConfigMaps
properties:
configMapRef:
description: The ConfigMap to select from
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the ConfigMap must be
defined
type: boolean
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each
key in the ConfigMap. Must be a C_IDENTIFIER.
type: string
secretRef:
description: The Secret to select from
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: Specify whether the Secret must be defined
type: boolean
type: object
x-kubernetes-map-type: atomic
type: object
type: array
image:
description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images
This field is optional to allow higher level config management
to default or override container images in workload controllers
like Deployments and StatefulSets.'
type: string
imagePullPolicy:
description: 'Image pull policy. One of Always, Never, IfNotPresent.
Defaults to Always if :latest tag is specified, or IfNotPresent
otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images'
type: string
lifecycle:
description: Actions that the management system should take
in response to container lifecycle events. Cannot be updated.
properties:
postStart:
description: 'PostStart is called immediately after a container
is created. If the handler fails, the container is terminated
and restarted according to its restart policy. Other management
of the container blocks until the hook completes. More
info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for
the command is root ('/') in the container's
filesystem. The command is simply exec'd, it is
not run inside a shell, so traditional shell instructions
('|', etc) won't work. To use a shell, you need
to explicitly call out to that shell. Exit status
of 0 is treated as live/healthy and non-zero is
unhealthy.
items:
type: string
type: array
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to
the pod IP. You probably want to set "Host" in
httpHeaders instead.
type: string
httpHeaders:
description: Custom headers to set in the request.
HTTP allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the
host. Defaults to HTTP.
type: string
required:
- port
type: object
tcpSocket:
description: Deprecated. TCPSocket is NOT supported
as a LifecycleHandler and kept for the backward compatibility.
There are no validation of this field and lifecycle
hooks will fail in runtime when tcp handler is specified.
properties:
host:
description: 'Optional: Host name to connect to,
defaults to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
type: object
preStop:
description: 'PreStop is called immediately before a container
is terminated due to an API request or management event
such as liveness/startup probe failure, preemption, resource
contention, etc. The handler is not called if the container
crashes or exits. The Pod''s termination grace period
countdown begins before the PreStop hook is executed.
Regardless of the outcome of the handler, the container
will eventually terminate within the Pod''s termination
grace period (unless delayed by finalizers). Other management
of the container blocks until the hook completes or until
the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for
the command is root ('/') in the container's
filesystem. The command is simply exec'd, it is
not run inside a shell, so traditional shell instructions
('|', etc) won't work. To use a shell, you need
to explicitly call out to that shell. Exit status
of 0 is treated as live/healthy and non-zero is
unhealthy.
items:
type: string
type: array
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to
the pod IP. You probably want to set "Host" in
httpHeaders instead.
type: string
httpHeaders:
description: Custom headers to set in the request.
HTTP allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the
host. Defaults to HTTP.
type: string
required:
- port
type: object
tcpSocket:
description: Deprecated. TCPSocket is NOT supported
as a LifecycleHandler and kept for the backward compatibility.
There are no validation of this field and lifecycle
hooks will fail in runtime when tcp handler is specified.
properties:
host:
description: 'Optional: Host name to connect to,
defaults to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access
on the container. Number must be in the range
1 to 65535. Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
type: object
type: object
livenessProbe:
description: 'Periodic probe of container liveness. Container
will be restarted if the probe fails. Cannot be updated. More
info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
name:
description: Name of the container specified as a DNS_LABEL.
Each container in a pod must have a unique name (DNS_LABEL).
Cannot be updated.
type: string
ports:
description: List of ports to expose from the container. Not
specifying a port here DOES NOT prevent that port from being
exposed. Any port which is listening on the default "0.0.0.0"
address inside a container will be accessible from the network.
Modifying this array with strategic merge patch may corrupt
the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255.
Cannot be updated.
items:
description: ContainerPort represents a network port in a
single container.
properties:
containerPort:
description: Number of port to expose on the pod's IP
address. This must be a valid port number, 0 < x < 65536.
format: int32
type: integer
hostIP:
description: What host IP to bind the external port to.
type: string
hostPort:
description: Number of port to expose on the host. If
specified, this must be a valid port number, 0 < x <
65536. If HostNetwork is specified, this must match
ContainerPort. Most containers do not need this.
format: int32
type: integer
name:
description: If specified, this must be an IANA_SVC_NAME
and unique within the pod. Each named port in a pod
must have a unique name. Name for the port that can
be referred to by services.
type: string
protocol:
default: TCP
description: Protocol for port. Must be UDP, TCP, or SCTP.
Defaults to "TCP".
type: string
required:
- containerPort
type: object
type: array
x-kubernetes-list-map-keys:
- containerPort
- protocol
x-kubernetes-list-type: map
readinessProbe:
description: 'Periodic probe of container service readiness.
Container will be removed from service endpoints if the probe
fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
resizePolicy:
description: Resources resize policy for the container.
items:
description: ContainerResizePolicy represents resource resize
policy for the container.
properties:
resourceName:
description: 'Name of the resource to which this resource
resize policy applies. Supported values: cpu, memory.'
type: string
restartPolicy:
description: Restart policy to apply when specified resource
is resized. If not specified, it defaults to NotRequired.
type: string
required:
- resourceName
- restartPolicy
type: object
type: array
x-kubernetes-list-type: atomic
resources:
description: 'Compute Resources required by this container.
Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
properties:
claims:
description: "Claims lists the names of resources, defined
in spec.resourceClaims, that are used by this container.
\n This is an alpha field and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable. It can only
be set for containers."
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of one entry
in pod.spec.resourceClaims of the Pod where this
field is used. It makes that resource available
inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount of compute
resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount of compute
resources required. If Requests is omitted for a container,
it defaults to Limits if that is explicitly specified,
otherwise to an implementation-defined value. Requests
cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
restartPolicy:
description: 'RestartPolicy defines the restart behavior of
individual containers in a pod. This field may only be set
for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
the restart behavior is defined by the Pod''s restart policy
and the container type. Setting the RestartPolicy as "Always"
for the init container will have the following effect: this
init container will be continually restarted on exit until
all regular containers have terminated. Once all regular containers
have completed, all init containers with restartPolicy "Always"
will be shut down. This lifecycle differs from normal init
containers and is often referred to as a "sidecar" container.
Although this init container still starts in the init container
sequence, it does not wait for the container to complete before
proceeding to the next init container. Instead, the next init
container starts immediately after this init container is
started, or after any startupProbe has successfully completed.'
type: string
securityContext:
description: 'SecurityContext defines the security options the
container should be run with. If set, the fields of SecurityContext
override the equivalent fields of PodSecurityContext. More
info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/'
properties:
allowPrivilegeEscalation:
description: 'AllowPrivilegeEscalation controls whether
a process can gain more privileges than its parent process.
This bool directly controls if the no_new_privs flag will
be set on the container process. AllowPrivilegeEscalation
is true always when the container is: 1) run as Privileged
2) has CAP_SYS_ADMIN Note that this field cannot be set
when spec.os.name is windows.'
type: boolean
capabilities:
description: The capabilities to add/drop when running containers.
Defaults to the default set of capabilities granted by
the container runtime. Note that this field cannot be
set when spec.os.name is windows.
properties:
add:
description: Added capabilities
items:
description: Capability represent POSIX capabilities
type
type: string
type: array
drop:
description: Removed capabilities
items:
description: Capability represent POSIX capabilities
type
type: string
type: array
type: object
privileged:
description: Run container in privileged mode. Processes
in privileged containers are essentially equivalent to
root on the host. Defaults to false. Note that this field
cannot be set when spec.os.name is windows.
type: boolean
procMount:
description: procMount denotes the type of proc mount to
use for the containers. The default is DefaultProcMount
which uses the container runtime defaults for readonly
paths and masked paths. This requires the ProcMountType
feature flag to be enabled. Note that this field cannot
be set when spec.os.name is windows.
type: string
readOnlyRootFilesystem:
description: Whether this container has a read-only root
filesystem. Default is false. Note that this field cannot
be set when spec.os.name is windows.
type: boolean
runAsGroup:
description: The GID to run the entrypoint of the container
process. Uses runtime default if unset. May also be set
in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence. Note that this field cannot be set when
spec.os.name is windows.
format: int64
type: integer
runAsNonRoot:
description: Indicates that the container must run as a
non-root user. If true, the Kubelet will validate the
image at runtime to ensure that it does not run as UID
0 (root) and fail to start the container if it does. If
unset or false, no such validation will be performed.
May also be set in PodSecurityContext. If set in both
SecurityContext and PodSecurityContext, the value specified
in SecurityContext takes precedence.
type: boolean
runAsUser:
description: The UID to run the entrypoint of the container
process. Defaults to user specified in image metadata
if unspecified. May also be set in PodSecurityContext. If
set in both SecurityContext and PodSecurityContext, the
value specified in SecurityContext takes precedence. Note
that this field cannot be set when spec.os.name is windows.
format: int64
type: integer
seLinuxOptions:
description: The SELinux context to be applied to the container.
If unspecified, the container runtime will allocate a
random SELinux context for each container. May also be
set in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence. Note that this field cannot be set when
spec.os.name is windows.
properties:
level:
description: Level is SELinux level label that applies
to the container.
type: string
role:
description: Role is a SELinux role label that applies
to the container.
type: string
type:
description: Type is a SELinux type label that applies
to the container.
type: string
user:
description: User is a SELinux user label that applies
to the container.
type: string
type: object
seccompProfile:
description: The seccomp options to use by this container.
If seccomp options are provided at both the pod & container
level, the container options override the pod options.
Note that this field cannot be set when spec.os.name is
windows.
properties:
localhostProfile:
description: localhostProfile indicates a profile defined
in a file on the node should be used. The profile
must be preconfigured on the node to work. Must be
a descending path, relative to the kubelet's configured
seccomp profile location. Must be set if type is "Localhost".
Must NOT be set for any other type.
type: string
type:
description: "type indicates which kind of seccomp profile
will be applied. Valid options are: \n Localhost -
a profile defined in a file on the node should be
used. RuntimeDefault - the container runtime default
profile should be used. Unconfined - no profile should
be applied."
type: string
required:
- type
type: object
windowsOptions:
description: The Windows specific settings applied to all
containers. If unspecified, the options from the PodSecurityContext
will be used. If set in both SecurityContext and PodSecurityContext,
the value specified in SecurityContext takes precedence.
Note that this field cannot be set when spec.os.name is
linux.
properties:
gmsaCredentialSpec:
description: GMSACredentialSpec is where the GMSA admission
webhook (https://github.com/kubernetes-sigs/windows-gmsa)
inlines the contents of the GMSA credential spec named
by the GMSACredentialSpecName field.
type: string
gmsaCredentialSpecName:
description: GMSACredentialSpecName is the name of the
GMSA credential spec to use.
type: string
hostProcess:
description: HostProcess determines if a container should
be run as a 'Host Process' container. All of a Pod's
containers must have the same effective HostProcess
value (it is not allowed to have a mix of HostProcess
containers and non-HostProcess containers). In addition,
if HostProcess is true then HostNetwork must also
be set to true.
type: boolean
runAsUserName:
description: The UserName in Windows to run the entrypoint
of the container process. Defaults to the user specified
in image metadata if unspecified. May also be set
in PodSecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence.
type: string
type: object
type: object
startupProbe:
description: 'StartupProbe indicates that the Pod has successfully
initialized. If specified, no other probes are executed until
this completes successfully. If this probe fails, the Pod
will be restarted, just as if the livenessProbe failed. This
can be used to provide different probe parameters at the beginning
of a Pod''s lifecycle, when it might take a long time to load
data or warm a cache, than during steady-state operation.
This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
properties:
exec:
description: Exec specifies the action to take.
properties:
command:
description: Command is the command line to execute
inside the container, the working directory for the
command is root ('/') in the container's filesystem.
The command is simply exec'd, it is not run inside
a shell, so traditional shell instructions ('|', etc)
won't work. To use a shell, you need to explicitly
call out to that shell. Exit status of 0 is treated
as live/healthy and non-zero is unhealthy.
items:
type: string
type: array
type: object
failureThreshold:
description: Minimum consecutive failures for the probe
to be considered failed after having succeeded. Defaults
to 3. Minimum value is 1.
format: int32
type: integer
grpc:
description: GRPC specifies an action involving a GRPC port.
properties:
port:
description: Port number of the gRPC service. Number
must be in the range 1 to 65535.
format: int32
type: integer
service:
description: "Service is the name of the service to
place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
\n If this is not specified, the default behavior
is defined by gRPC."
type: string
required:
- port
type: object
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
host:
description: Host name to connect to, defaults to the
pod IP. You probably want to set "Host" in httpHeaders
instead.
type: string
httpHeaders:
description: Custom headers to set in the request. HTTP
allows repeated headers.
items:
description: HTTPHeader describes a custom header
to be used in HTTP probes
properties:
name:
description: The header field name. This will
be canonicalized upon output, so case-variant
names will be understood as the same header.
type: string
value:
description: The header field value
type: string
required:
- name
- value
type: object
type: array
path:
description: Path to access on the HTTP server.
type: string
port:
anyOf:
- type: integer
- type: string
description: Name or number of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
scheme:
description: Scheme to use for connecting to the host.
Defaults to HTTP.
type: string
required:
- port
type: object
initialDelaySeconds:
description: 'Number of seconds after the container has
started before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
periodSeconds:
description: How often (in seconds) to perform the probe.
Default to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe
to be considered successful after having failed. Defaults
to 1. Must be 1 for liveness and startup. Minimum value
is 1.
format: int32
type: integer
tcpSocket:
description: TCPSocket specifies an action involving a TCP
port.
properties:
host:
description: 'Optional: Host name to connect to, defaults
to the pod IP.'
type: string
port:
anyOf:
- type: integer
- type: string
description: Number or name of the port to access on
the container. Number must be in the range 1 to 65535.
Name must be an IANA_SVC_NAME.
x-kubernetes-int-or-string: true
required:
- port
type: object
terminationGracePeriodSeconds:
description: Optional duration in seconds the pod needs
to terminate gracefully upon probe failure. The grace
period is the duration in seconds after the processes
running in the pod are sent a termination signal and the
time when the processes are forcibly halted with a kill
signal. Set this value longer than the expected cleanup
time for your process. If this value is nil, the pod's
terminationGracePeriodSeconds will be used. Otherwise,
this value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates
stop immediately via the kill signal (no opportunity to
shut down). This is a beta field and requires enabling
ProbeTerminationGracePeriod feature gate. Minimum value
is 1. spec.terminationGracePeriodSeconds is used if unset.
format: int64
type: integer
timeoutSeconds:
description: 'Number of seconds after which the probe times
out. Defaults to 1 second. Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
stdin:
description: Whether this container should allocate a buffer
for stdin in the container runtime. If this is not set, reads
from stdin in the container will always result in EOF. Default
is false.
type: boolean
stdinOnce:
description: Whether the container runtime should close the
stdin channel after it has been opened by a single attach.
When stdin is true the stdin stream will remain open across
multiple attach sessions. If stdinOnce is set to true, stdin
is opened on container start, is empty until the first client
attaches to stdin, and then remains open and accepts data
until the client disconnects, at which time stdin is closed
and remains closed until the container is restarted. If this
flag is false, a container processes that reads from stdin
will never receive an EOF. Default is false
type: boolean
terminationMessagePath:
description: 'Optional: Path at which the file to which the
container''s termination message will be written is mounted
into the container''s filesystem. Message written is intended
to be brief final status, such as an assertion failure message.
Will be truncated by the node if greater than 4096 bytes.
The total message length across all containers will be limited
to 12kb. Defaults to /dev/termination-log. Cannot be updated.'
type: string
terminationMessagePolicy:
description: Indicate how the termination message should be
populated. File will use the contents of terminationMessagePath
to populate the container status message on both success and
failure. FallbackToLogsOnError will use the last chunk of
container log output if the termination message file is empty
and the container exited with an error. The log output is
limited to 2048 bytes or 80 lines, whichever is smaller. Defaults
to File. Cannot be updated.
type: string
tty:
description: Whether this container should allocate a TTY for
itself, also requires 'stdin' to be true. Default is false.
type: boolean
volumeDevices:
description: volumeDevices is the list of block devices to be
used by the container.
items:
description: volumeDevice describes a mapping of a raw block
device within a container.
properties:
devicePath:
description: devicePath is the path inside of the container
that the device will be mapped to.
type: string
name:
description: name must match the name of a persistentVolumeClaim
in the pod
type: string
required:
- devicePath
- name
type: object
type: array
volumeMounts:
description: Pod volumes to mount into the container's filesystem.
Cannot be updated.
items:
description: VolumeMount describes a mounting of a Volume
within a container.
properties:
mountPath:
description: Path within the container at which the volume
should be mounted. Must not contain ':'.
type: string
mountPropagation:
description: mountPropagation determines how mounts are
propagated from the host to container and the other
way around. When not set, MountPropagationNone is used.
This field is beta in 1.10.
type: string
name:
description: This must match the Name of a Volume.
type: string
readOnly:
description: Mounted read-only if true, read-write otherwise
(false or unspecified). Defaults to false.
type: boolean
subPath:
description: Path within the volume from which the container's
volume should be mounted. Defaults to "" (volume's root).
type: string
subPathExpr:
description: Expanded path within the volume from which
the container's volume should be mounted. Behaves similarly
to SubPath but environment variable references $(VAR_NAME)
are expanded using the container's environment. Defaults
to "" (volume's root). SubPathExpr and SubPath are mutually
exclusive.
type: string
required:
- mountPath
- name
type: object
type: array
workingDir:
description: Container's working directory. If not specified,
the container runtime's default will be used, which might
be configured in the container image. Cannot be updated.
type: string
required:
- name
type: object
type: array
labels:
additionalProperties:
type: string
description: Labels configure the external label pairs to ThanosRuler.
A default replica label `thanos_ruler_replica` will be always added as
a label with the value of the pod's name and it will be dropped
in the alerts.
type: object
listenLocal:
description: ListenLocal makes the Thanos ruler listen on loopback,
so that it does not bind against the Pod IP.
type: boolean
logFormat:
description: Log format for ThanosRuler to be configured with.
enum:
- ""
- logfmt
- json
type: string
logLevel:
description: Log level for ThanosRuler to be configured with.
enum:
- ""
- debug
- info
- warn
- error
type: string
minReadySeconds:
description: Minimum number of seconds for which a newly created pod
should be ready without any of its container crashing for it to
be considered available. Defaults to 0 (pod will be considered available
as soon as it is ready) This is an alpha field from kubernetes 1.22
until 1.24 which requires enabling the StatefulSetMinReadySeconds
feature gate.
format: int32
type: integer
nodeSelector:
additionalProperties:
type: string
description: Define which Nodes the Pods are scheduled on.
type: object
objectStorageConfig:
description: ObjectStorageConfig configures object storage in Thanos.
Alternative to ObjectStorageConfigFile, and lower order priority.
properties:
key:
description: The key of the secret to select from. Must be a
valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
objectStorageConfigFile:
description: ObjectStorageConfigFile specifies the path of the object
storage configuration file. When used alongside with ObjectStorageConfig,
ObjectStorageConfigFile takes precedence.
type: string
paused:
description: When a ThanosRuler deployment is paused, no actions except
for deletion will be performed on the underlying objects.
type: boolean
podMetadata:
description: "PodMetadata configures labels and annotations which
are propagated to the ThanosRuler pods. \n The following items are
reserved and cannot be overridden: * \"app.kubernetes.io/name\"
label, set to \"thanos-ruler\". * \"app.kubernetes.io/managed-by\"
label, set to \"prometheus-operator\". * \"app.kubernetes.io/instance\"
label, set to the name of the ThanosRuler instance. * \"thanos-ruler\"
label, set to the name of the ThanosRuler instance. * \"kubectl.kubernetes.io/default-container\"
annotation, set to \"thanos-ruler\"."
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value map stored
with a resource that may be set by external tools to store and
retrieve arbitrary metadata. They are not queryable and should
be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be used to
organize and categorize (scope and select) objects. May match
selectors of replication controllers and services. More info:
http://kubernetes.io/docs/user-guide/labels'
type: object
name:
description: 'Name must be unique within a namespace. Is required
when creating resources, although some resources may allow a
client to request the generation of an appropriate name automatically.
Name is primarily intended for creation idempotence and configuration
definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
type: object
portName:
default: web
description: Port name used for the pods and governing service. Defaults
to `web`.
type: string
priorityClassName:
description: Priority class assigned to the Pods
type: string
prometheusRulesExcludedFromEnforce:
description: 'PrometheusRulesExcludedFromEnforce - list of Prometheus
rules to be excluded from enforcing of adding namespace labels.
Works only if enforcedNamespaceLabel set to true. Make sure both
ruleNamespace and ruleName are set for each pair Deprecated: use
excludedFromEnforcement instead.'
items:
description: PrometheusRuleExcludeConfig enables users to configure
excluded PrometheusRule names and their namespaces to be ignored
while enforcing namespace label for alerts and metrics.
properties:
ruleName:
description: Name of the excluded PrometheusRule object.
type: string
ruleNamespace:
description: Namespace of the excluded PrometheusRule object.
type: string
required:
- ruleName
- ruleNamespace
type: object
type: array
queryConfig:
description: Define configuration for connecting to thanos query instances.
If this is defined, the QueryEndpoints field will be ignored. Maps
to the `query.config` CLI argument. Only available with thanos v0.11.0
and higher.
properties:
key:
description: The key of the secret to select from. Must be a
valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
queryEndpoints:
description: QueryEndpoints defines Thanos querier endpoints from
which to query metrics. Maps to the --query flag of thanos ruler.
items:
type: string
type: array
replicas:
description: Number of thanos ruler instances to deploy.
format: int32
type: integer
resources:
description: Resources defines the resource requirements for single
Pods. If not provided, no requests/limits will be set
properties:
claims:
description: "Claims lists the names of resources, defined in
spec.resourceClaims, that are used by this container. \n This
is an alpha field and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable. It can only be set
for containers."
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of one entry in pod.spec.resourceClaims
of the Pod where this field is used. It makes that resource
available inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount of compute resources
allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount of compute
resources required. If Requests is omitted for a container,
it defaults to Limits if that is explicitly specified, otherwise
to an implementation-defined value. Requests cannot exceed Limits.
More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
retention:
default: 24h
description: Time duration ThanosRuler shall retain data for. Default
is '24h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)`
(milliseconds seconds minutes hours days weeks years).
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
routePrefix:
description: The route prefix ThanosRuler registers HTTP handlers
for. This allows thanos UI to be served on a sub-path.
type: string
ruleNamespaceSelector:
description: Namespaces to be selected for Rules discovery. If unspecified,
only the same namespace as the ThanosRuler object is in is used.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
ruleSelector:
description: A label selector to select which PrometheusRules to mount
for alerting and recording.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
securityContext:
description: SecurityContext holds pod-level security attributes and
common container settings. This defaults to the default PodSecurityContext.
properties:
fsGroup:
description: "A special supplemental group that applies to all
containers in a pod. Some volume types allow the Kubelet to
change the ownership of that volume to be owned by the pod:
\n 1. The owning GID will be the FSGroup 2. The setgid bit is
set (new files created in the volume will be owned by FSGroup)
3. The permission bits are OR'd with rw-rw---- \n If unset,
the Kubelet will not modify the ownership and permissions of
any volume. Note that this field cannot be set when spec.os.name
is windows."
format: int64
type: integer
fsGroupChangePolicy:
description: 'fsGroupChangePolicy defines behavior of changing
ownership and permission of the volume before being exposed
inside Pod. This field will only apply to volume types which
support fsGroup based ownership(and permissions). It will have
no effect on ephemeral volume types such as: secret, configmaps
and emptydir. Valid values are "OnRootMismatch" and "Always".
If not specified, "Always" is used. Note that this field cannot
be set when spec.os.name is windows.'
type: string
runAsGroup:
description: The GID to run the entrypoint of the container process.
Uses runtime default if unset. May also be set in SecurityContext. If
set in both SecurityContext and PodSecurityContext, the value
specified in SecurityContext takes precedence for that container.
Note that this field cannot be set when spec.os.name is windows.
format: int64
type: integer
runAsNonRoot:
description: Indicates that the container must run as a non-root
user. If true, the Kubelet will validate the image at runtime
to ensure that it does not run as UID 0 (root) and fail to start
the container if it does. If unset or false, no such validation
will be performed. May also be set in SecurityContext. If set
in both SecurityContext and PodSecurityContext, the value specified
in SecurityContext takes precedence.
type: boolean
runAsUser:
description: The UID to run the entrypoint of the container process.
Defaults to user specified in image metadata if unspecified.
May also be set in SecurityContext. If set in both SecurityContext
and PodSecurityContext, the value specified in SecurityContext
takes precedence for that container. Note that this field cannot
be set when spec.os.name is windows.
format: int64
type: integer
seLinuxOptions:
description: The SELinux context to be applied to all containers.
If unspecified, the container runtime will allocate a random
SELinux context for each container. May also be set in SecurityContext. If
set in both SecurityContext and PodSecurityContext, the value
specified in SecurityContext takes precedence for that container.
Note that this field cannot be set when spec.os.name is windows.
properties:
level:
description: Level is SELinux level label that applies to
the container.
type: string
role:
description: Role is a SELinux role label that applies to
the container.
type: string
type:
description: Type is a SELinux type label that applies to
the container.
type: string
user:
description: User is a SELinux user label that applies to
the container.
type: string
type: object
seccompProfile:
description: The seccomp options to use by the containers in this
pod. Note that this field cannot be set when spec.os.name is
windows.
properties:
localhostProfile:
description: localhostProfile indicates a profile defined
in a file on the node should be used. The profile must be
preconfigured on the node to work. Must be a descending
path, relative to the kubelet's configured seccomp profile
location. Must be set if type is "Localhost". Must NOT be
set for any other type.
type: string
type:
description: "type indicates which kind of seccomp profile
will be applied. Valid options are: \n Localhost - a profile
defined in a file on the node should be used. RuntimeDefault
- the container runtime default profile should be used.
Unconfined - no profile should be applied."
type: string
required:
- type
type: object
supplementalGroups:
description: A list of groups applied to the first process run
in each container, in addition to the container's primary GID,
the fsGroup (if specified), and group memberships defined in
the container image for the uid of the container process. If
unspecified, no additional groups are added to any container.
Note that group memberships defined in the container image for
the uid of the container process are still effective, even if
they are not included in this list. Note that this field cannot
be set when spec.os.name is windows.
items:
format: int64
type: integer
type: array
sysctls:
description: Sysctls hold a list of namespaced sysctls used for
the pod. Pods with unsupported sysctls (by the container runtime)
might fail to launch. Note that this field cannot be set when
spec.os.name is windows.
items:
description: Sysctl defines a kernel parameter to be set
properties:
name:
description: Name of a property to set
type: string
value:
description: Value of a property to set
type: string
required:
- name
- value
type: object
type: array
windowsOptions:
description: The Windows specific settings applied to all containers.
If unspecified, the options within a container's SecurityContext
will be used. If set in both SecurityContext and PodSecurityContext,
the value specified in SecurityContext takes precedence. Note
that this field cannot be set when spec.os.name is linux.
properties:
gmsaCredentialSpec:
description: GMSACredentialSpec is where the GMSA admission
webhook (https://github.com/kubernetes-sigs/windows-gmsa)
inlines the contents of the GMSA credential spec named by
the GMSACredentialSpecName field.
type: string
gmsaCredentialSpecName:
description: GMSACredentialSpecName is the name of the GMSA
credential spec to use.
type: string
hostProcess:
description: HostProcess determines if a container should
be run as a 'Host Process' container. All of a Pod's containers
must have the same effective HostProcess value (it is not
allowed to have a mix of HostProcess containers and non-HostProcess
containers). In addition, if HostProcess is true then HostNetwork
must also be set to true.
type: boolean
runAsUserName:
description: The UserName in Windows to run the entrypoint
of the container process. Defaults to the user specified
in image metadata if unspecified. May also be set in PodSecurityContext.
If set in both SecurityContext and PodSecurityContext, the
value specified in SecurityContext takes precedence.
type: string
type: object
type: object
serviceAccountName:
description: ServiceAccountName is the name of the ServiceAccount
to use to run the Thanos Ruler Pods.
type: string
storage:
description: Storage spec to specify how storage shall be used.
properties:
disableMountSubPath:
description: 'Deprecated: subPath usage will be removed in a future
release.'
type: boolean
emptyDir:
description: 'EmptyDirVolumeSource to be used by the StatefulSet.
If specified, it takes precedence over `ephemeral` and `volumeClaimTemplate`.
More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir'
properties:
medium:
description: 'medium represents what type of storage medium
should back this directory. The default is "" which means
to use the node''s default medium. Must be an empty string
(default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
type: string
sizeLimit:
anyOf:
- type: integer
- type: string
description: 'sizeLimit is the total amount of local storage
required for this EmptyDir volume. The size limit is also
applicable for memory medium. The maximum usage on memory
medium EmptyDir would be the minimum value between the SizeLimit
specified here and the sum of memory limits of all containers
in a pod. The default is nil which means that the limit
is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
type: object
ephemeral:
description: 'EphemeralVolumeSource to be used by the StatefulSet.
This is a beta field in k8s 1.21 and GA in 1.15. For lower versions,
starting with k8s 1.19, it requires enabling the GenericEphemeralVolume
feature gate. More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes'
properties:
volumeClaimTemplate:
description: "Will be used to create a stand-alone PVC to
provision the volume. The pod in which this EphemeralVolumeSource
is embedded will be the owner of the PVC, i.e. the PVC will
be deleted together with the pod. The name of the PVC will
be `-` where `` is the
name from the `PodSpec.Volumes` array entry. Pod validation
will reject the pod if the concatenated name is not valid
for a PVC (for example, too long). \n An existing PVC with
that name that is not owned by the pod will *not* be used
for the pod to avoid using an unrelated volume by mistake.
Starting the pod is then blocked until the unrelated PVC
is removed. If such a pre-created PVC is meant to be used
by the pod, the PVC has to updated with an owner reference
to the pod once the pod exists. Normally this should not
be necessary, but it may be useful when manually reconstructing
a broken cluster. \n This field is read-only and no changes
will be made by Kubernetes to the PVC after it has been
created. \n Required, must not be nil."
properties:
metadata:
description: May contain labels and annotations that will
be copied into the PVC when creating it. No other fields
are allowed and will be rejected during validation.
type: object
spec:
description: The specification for the PersistentVolumeClaim.
The entire content is copied unchanged into the PVC
that gets created from this template. The same fields
as in a PersistentVolumeClaim are also valid here.
properties:
accessModes:
description: 'accessModes contains the desired access
modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
items:
type: string
type: array
dataSource:
description: 'dataSource field can be used to specify
either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
* An existing PVC (PersistentVolumeClaim) If the
provisioner or an external controller can support
the specified data source, it will create a new
volume based on the contents of the specified data
source. When the AnyVolumeDataSource feature gate
is enabled, dataSource contents will be copied to
dataSourceRef, and dataSourceRef contents will be
copied to dataSource when dataSourceRef.namespace
is not specified. If the namespace is specified,
then dataSourceRef will not be copied to dataSource.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API group.
For any other third-party types, APIGroup is
required.
type: string
kind:
description: Kind is the type of resource being
referenced
type: string
name:
description: Name is the name of resource being
referenced
type: string
required:
- kind
- name
type: object
x-kubernetes-map-type: atomic
dataSourceRef:
description: 'dataSourceRef specifies the object from
which to populate the volume with data, if a non-empty
volume is desired. This may be any object from a
non-empty API group (non core object) or a PersistentVolumeClaim
object. When this field is specified, volume binding
will only succeed if the type of the specified object
matches some installed volume populator or dynamic
provisioner. This field will replace the functionality
of the dataSource field and as such if both fields
are non-empty, they must have the same value. For
backwards compatibility, when namespace isn''t specified
in dataSourceRef, both fields (dataSource and dataSourceRef)
will be set to the same value automatically if one
of them is empty and the other is non-empty. When
namespace is specified in dataSourceRef, dataSource
isn''t set to the same value and must be empty.
There are three important differences between dataSource
and dataSourceRef: * While dataSource only allows
two specific types of objects, dataSourceRef allows
any non-core object, as well as PersistentVolumeClaim
objects. * While dataSource ignores disallowed values
(dropping them), dataSourceRef preserves all values,
and generates an error if a disallowed value is
specified. * While dataSource only allows local
objects, dataSourceRef allows objects in any namespaces.
(Beta) Using this field requires the AnyVolumeDataSource
feature gate to be enabled. (Alpha) Using the namespace
field of dataSourceRef requires the CrossNamespaceVolumeDataSource
feature gate to be enabled.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API group.
For any other third-party types, APIGroup is
required.
type: string
kind:
description: Kind is the type of resource being
referenced
type: string
name:
description: Name is the name of resource being
referenced
type: string
namespace:
description: Namespace is the namespace of resource
being referenced Note that when a namespace
is specified, a gateway.networking.k8s.io/ReferenceGrant
object is required in the referent namespace
to allow that namespace's owner to accept the
reference. See the ReferenceGrant documentation
for details. (Alpha) This field requires the
CrossNamespaceVolumeDataSource feature gate
to be enabled.
type: string
required:
- kind
- name
type: object
resources:
description: 'resources represents the minimum resources
the volume should have. If RecoverVolumeExpansionFailure
feature is enabled users are allowed to specify
resource requirements that are lower than previous
value but must still be higher than capacity recorded
in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
properties:
claims:
description: "Claims lists the names of resources,
defined in spec.resourceClaims, that are used
by this container. \n This is an alpha field
and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable. It
can only be set for containers."
items:
description: ResourceClaim references one entry
in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of
one entry in pod.spec.resourceClaims of
the Pod where this field is used. It makes
that resource available inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount
of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount
of compute resources required. If Requests is
omitted for a container, it defaults to Limits
if that is explicitly specified, otherwise to
an implementation-defined value. Requests cannot
exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
selector:
description: selector is a label query over volumes
to consider for binding.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement is
a selector that contains values, a key, and
an operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If
the operator is Exists or DoesNotExist,
the values array must be empty. This array
is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
storageClassName:
description: 'storageClassName is the name of the
StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
type: string
volumeMode:
description: volumeMode defines what type of volume
is required by the claim. Value of Filesystem is
implied when not included in claim spec.
type: string
volumeName:
description: volumeName is the binding reference to
the PersistentVolume backing this claim.
type: string
type: object
required:
- spec
type: object
type: object
volumeClaimTemplate:
description: Defines the PVC spec to be used by the Prometheus
StatefulSets. The easiest way to use a volume that cannot be
automatically provisioned is to use a label selector alongside
manually created PersistentVolumes.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this
representation of an object. Servers should convert recognized
schemas to the latest internal value, and may reject unrecognized
values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST
resource this object represents. Servers may infer this
from the endpoint the client submits requests to. Cannot
be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
description: EmbeddedMetadata contains metadata relevant to
an EmbeddedResource.
properties:
annotations:
additionalProperties:
type: string
description: 'Annotations is an unstructured key value
map stored with a resource that may be set by external
tools to store and retrieve arbitrary metadata. They
are not queryable and should be preserved when modifying
objects. More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
labels:
additionalProperties:
type: string
description: 'Map of string keys and values that can be
used to organize and categorize (scope and select) objects.
May match selectors of replication controllers and services.
More info: http://kubernetes.io/docs/user-guide/labels'
type: object
name:
description: 'Name must be unique within a namespace.
Is required when creating resources, although some resources
may allow a client to request the generation of an appropriate
name automatically. Name is primarily intended for creation
idempotence and configuration definition. Cannot be
updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
type: object
spec:
description: 'Defines the desired characteristics of a volume
requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
properties:
accessModes:
description: 'accessModes contains the desired access
modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
items:
type: string
type: array
dataSource:
description: 'dataSource field can be used to specify
either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
* An existing PVC (PersistentVolumeClaim) If the provisioner
or an external controller can support the specified
data source, it will create a new volume based on the
contents of the specified data source. When the AnyVolumeDataSource
feature gate is enabled, dataSource contents will be
copied to dataSourceRef, and dataSourceRef contents
will be copied to dataSource when dataSourceRef.namespace
is not specified. If the namespace is specified, then
dataSourceRef will not be copied to dataSource.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API group.
For any other third-party types, APIGroup is required.
type: string
kind:
description: Kind is the type of resource being referenced
type: string
name:
description: Name is the name of resource being referenced
type: string
required:
- kind
- name
type: object
x-kubernetes-map-type: atomic
dataSourceRef:
description: 'dataSourceRef specifies the object from
which to populate the volume with data, if a non-empty
volume is desired. This may be any object from a non-empty
API group (non core object) or a PersistentVolumeClaim
object. When this field is specified, volume binding
will only succeed if the type of the specified object
matches some installed volume populator or dynamic provisioner.
This field will replace the functionality of the dataSource
field and as such if both fields are non-empty, they
must have the same value. For backwards compatibility,
when namespace isn''t specified in dataSourceRef, both
fields (dataSource and dataSourceRef) will be set to
the same value automatically if one of them is empty
and the other is non-empty. When namespace is specified
in dataSourceRef, dataSource isn''t set to the same
value and must be empty. There are three important differences
between dataSource and dataSourceRef: * While dataSource
only allows two specific types of objects, dataSourceRef
allows any non-core object, as well as PersistentVolumeClaim
objects. * While dataSource ignores disallowed values
(dropping them), dataSourceRef preserves all values,
and generates an error if a disallowed value is specified.
* While dataSource only allows local objects, dataSourceRef
allows objects in any namespaces. (Beta) Using this
field requires the AnyVolumeDataSource feature gate
to be enabled. (Alpha) Using the namespace field of
dataSourceRef requires the CrossNamespaceVolumeDataSource
feature gate to be enabled.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API group.
For any other third-party types, APIGroup is required.
type: string
kind:
description: Kind is the type of resource being referenced
type: string
name:
description: Name is the name of resource being referenced
type: string
namespace:
description: Namespace is the namespace of resource
being referenced Note that when a namespace is specified,
a gateway.networking.k8s.io/ReferenceGrant object
is required in the referent namespace to allow that
namespace's owner to accept the reference. See the
ReferenceGrant documentation for details. (Alpha)
This field requires the CrossNamespaceVolumeDataSource
feature gate to be enabled.
type: string
required:
- kind
- name
type: object
resources:
description: 'resources represents the minimum resources
the volume should have. If RecoverVolumeExpansionFailure
feature is enabled users are allowed to specify resource
requirements that are lower than previous value but
must still be higher than capacity recorded in the status
field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
properties:
claims:
description: "Claims lists the names of resources,
defined in spec.resourceClaims, that are used by
this container. \n This is an alpha field and requires
enabling the DynamicResourceAllocation feature gate.
\n This field is immutable. It can only be set for
containers."
items:
description: ResourceClaim references one entry
in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name of one
entry in pod.spec.resourceClaims of the Pod
where this field is used. It makes that resource
available inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount
of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum amount
of compute resources required. If Requests is omitted
for a container, it defaults to Limits if that is
explicitly specified, otherwise to an implementation-defined
value. Requests cannot exceed Limits. More info:
https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
selector:
description: selector is a label query over volumes to
consider for binding.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector
that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In,
NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values.
If the operator is In or NotIn, the values
array must be non-empty. If the operator is
Exists or DoesNotExist, the values array must
be empty. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field
is "key", the operator is "In", and the values array
contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
storageClassName:
description: 'storageClassName is the name of the StorageClass
required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
type: string
volumeMode:
description: volumeMode defines what type of volume is
required by the claim. Value of Filesystem is implied
when not included in claim spec.
type: string
volumeName:
description: volumeName is the binding reference to the
PersistentVolume backing this claim.
type: string
type: object
status:
description: 'Deprecated: this field is never set.'
properties:
accessModes:
description: 'accessModes contains the actual access modes
the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
items:
type: string
type: array
allocatedResourceStatuses:
additionalProperties:
description: When a controller receives persistentvolume
claim update with ClaimResourceStatus for a resource
that it does not recognizes, then it should ignore
that update and let other controllers handle it.
type: string
description: "allocatedResourceStatuses stores status
of resource being resized for the given PVC. Key names
follow standard Kubernetes label syntax. Valid values
are either: * Un-prefixed keys: - storage - the capacity
of the volume. * Custom resources must use implementation-defined
prefixed names such as \"example.com/my-custom-resource\"
Apart from above values - keys that are unprefixed or
have kubernetes.io prefix are considered reserved and
hence may not be used. \n ClaimResourceStatus can be
in any of following states: - ControllerResizeInProgress:
State set when resize controller starts resizing the
volume in control-plane. - ControllerResizeFailed: State
set when resize has failed in resize controller with
a terminal error. - NodeResizePending: State set when
resize controller has finished resizing the volume but
further resizing of volume is needed on the node. -
NodeResizeInProgress: State set when kubelet starts
resizing the volume. - NodeResizeFailed: State set when
resizing has failed in kubelet with a terminal error.
Transient errors don't set NodeResizeFailed. For example:
if expanding a PVC for more capacity - this field can
be one of the following states: - pvc.status.allocatedResourceStatus['storage']
= \"ControllerResizeInProgress\" - pvc.status.allocatedResourceStatus['storage']
= \"ControllerResizeFailed\" - pvc.status.allocatedResourceStatus['storage']
= \"NodeResizePending\" - pvc.status.allocatedResourceStatus['storage']
= \"NodeResizeInProgress\" - pvc.status.allocatedResourceStatus['storage']
= \"NodeResizeFailed\" When this field is not set, it
means that no resize operation is in progress for the
given PVC. \n A controller that receives PVC update
with previously unknown resourceName or ClaimResourceStatus
should ignore the update for the purpose it was designed.
For example - a controller that only is responsible
for resizing capacity of the volume, should ignore PVC
updates that change other valid resources associated
with PVC. \n This is an alpha field and requires enabling
RecoverVolumeExpansionFailure feature."
type: object
x-kubernetes-map-type: granular
allocatedResources:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: "allocatedResources tracks the resources
allocated to a PVC including its capacity. Key names
follow standard Kubernetes label syntax. Valid values
are either: * Un-prefixed keys: - storage - the capacity
of the volume. * Custom resources must use implementation-defined
prefixed names such as \"example.com/my-custom-resource\"
Apart from above values - keys that are unprefixed or
have kubernetes.io prefix are considered reserved and
hence may not be used. \n Capacity reported here may
be larger than the actual capacity when a volume expansion
operation is requested. For storage quota, the larger
value from allocatedResources and PVC.spec.resources
is used. If allocatedResources is not set, PVC.spec.resources
alone is used for quota calculation. If a volume expansion
capacity request is lowered, allocatedResources is only
lowered if there are no expansion operations in progress
and if the actual volume capacity is equal or lower
than the requested capacity. \n A controller that receives
PVC update with previously unknown resourceName should
ignore the update for the purpose it was designed. For
example - a controller that only is responsible for
resizing capacity of the volume, should ignore PVC updates
that change other valid resources associated with PVC.
\n This is an alpha field and requires enabling RecoverVolumeExpansionFailure
feature."
type: object
capacity:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: capacity represents the actual resources
of the underlying volume.
type: object
conditions:
description: conditions is the current Condition of persistent
volume claim. If underlying persistent volume is being
resized then the Condition will be set to 'ResizeStarted'.
items:
description: PersistentVolumeClaimCondition contains
details about state of pvc
properties:
lastProbeTime:
description: lastProbeTime is the time we probed
the condition.
format: date-time
type: string
lastTransitionTime:
description: lastTransitionTime is the time the
condition transitioned from one status to another.
format: date-time
type: string
message:
description: message is the human-readable message
indicating details about last transition.
type: string
reason:
description: reason is a unique, this should be
a short, machine understandable string that gives
the reason for condition's last transition. If
it reports "ResizeStarted" that means the underlying
persistent volume is being resized.
type: string
status:
type: string
type:
description: PersistentVolumeClaimConditionType
is a valid value of PersistentVolumeClaimCondition.Type
type: string
required:
- status
- type
type: object
type: array
phase:
description: phase represents the current phase of PersistentVolumeClaim.
type: string
type: object
type: object
type: object
tolerations:
description: If specified, the pod's tolerations.
items:
description: The pod this Toleration is attached to tolerates any
taint that matches the triple using the matching
operator .
properties:
effect:
description: Effect indicates the taint effect to match. Empty
means match all taint effects. When specified, allowed values
are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: Key is the taint key that the toleration applies
to. Empty means match all taint keys. If the key is empty,
operator must be Exists; this combination means to match all
values and all keys.
type: string
operator:
description: Operator represents a key's relationship to the
value. Valid operators are Exists and Equal. Defaults to Equal.
Exists is equivalent to wildcard for value, so that a pod
can tolerate all taints of a particular category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period of time
the toleration (which must be of effect NoExecute, otherwise
this field is ignored) tolerates the taint. By default, it
is not set, which means tolerate the taint forever (do not
evict). Zero and negative values will be treated as 0 (evict
immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration matches
to. If the operator is Exists, the value should be empty,
otherwise just a regular string.
type: string
type: object
type: array
topologySpreadConstraints:
description: If specified, the pod's topology spread constraints.
items:
description: TopologySpreadConstraint specifies how to spread matching
pods among the given topology.
properties:
labelSelector:
description: LabelSelector is used to find matching pods. Pods
that match this label selector are counted to determine the
number of pods in their corresponding topology domain.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector
that contains values, a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In, NotIn,
Exists and DoesNotExist.
type: string
values:
description: values is an array of string values.
If the operator is In or NotIn, the values array
must be non-empty. If the operator is Exists or
DoesNotExist, the values array must be empty. This
array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field is
"key", the operator is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
matchLabelKeys:
description: "MatchLabelKeys is a set of pod label keys to select
the pods over which spreading will be calculated. The keys
are used to lookup values from the incoming pod labels, those
key-value labels are ANDed with labelSelector to select the
group of existing pods over which spreading will be calculated
for the incoming pod. The same key is forbidden to exist in
both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot
be set when LabelSelector isn't set. Keys that don't exist
in the incoming pod labels will be ignored. A null or empty
list means only match against labelSelector. \n This is a
beta field and requires the MatchLabelKeysInPodTopologySpread
feature gate to be enabled (enabled by default)."
items:
type: string
type: array
x-kubernetes-list-type: atomic
maxSkew:
description: 'MaxSkew describes the degree to which pods may
be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`,
it is the maximum permitted difference between the number
of matching pods in the target topology and the global minimum.
The global minimum is the minimum number of matching pods
in an eligible domain or zero if the number of eligible domains
is less than MinDomains. For example, in a 3-zone cluster,
MaxSkew is set to 1, and pods with the same labelSelector
spread as 2/2/1: In this case, the global minimum is 1. |
zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew
is 1, incoming pod can only be scheduled to zone3 to become
2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1)
on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming
pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`,
it is used to give higher precedence to topologies that satisfy
it. It''s a required field. Default value is 1 and 0 is not
allowed.'
format: int32
type: integer
minDomains:
description: "MinDomains indicates a minimum number of eligible
domains. When the number of eligible domains with matching
topology keys is less than minDomains, Pod Topology Spread
treats \"global minimum\" as 0, and then the calculation of
Skew is performed. And when the number of eligible domains
with matching topology keys equals or greater than minDomains,
this value has no effect on scheduling. As a result, when
the number of eligible domains is less than minDomains, scheduler
won't schedule more than maxSkew Pods to those domains. If
value is nil, the constraint behaves as if MinDomains is equal
to 1. Valid values are integers greater than 0. When value
is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For
example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains
is set to 5 and pods with the same labelSelector spread as
2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P |
The number of domains is less than 5(MinDomains), so \"global
minimum\" is treated as 0. In this situation, new pod with
the same labelSelector cannot be scheduled, because computed
skew will be 3(3 - 0) if new Pod is scheduled to any of the
three zones, it will violate MaxSkew. \n This is a beta field
and requires the MinDomainsInPodTopologySpread feature gate
to be enabled (enabled by default)."
format: int32
type: integer
nodeAffinityPolicy:
description: "NodeAffinityPolicy indicates how we will treat
Pod's nodeAffinity/nodeSelector when calculating pod topology
spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector
are included in the calculations. - Ignore: nodeAffinity/nodeSelector
are ignored. All nodes are included in the calculations. \n
If this value is nil, the behavior is equivalent to the Honor
policy. This is a beta-level feature default enabled by the
NodeInclusionPolicyInPodTopologySpread feature flag."
type: string
nodeTaintsPolicy:
description: "NodeTaintsPolicy indicates how we will treat node
taints when calculating pod topology spread skew. Options
are: - Honor: nodes without taints, along with tainted nodes
for which the incoming pod has a toleration, are included.
- Ignore: node taints are ignored. All nodes are included.
\n If this value is nil, the behavior is equivalent to the
Ignore policy. This is a beta-level feature default enabled
by the NodeInclusionPolicyInPodTopologySpread feature flag."
type: string
topologyKey:
description: TopologyKey is the key of node labels. Nodes that
have a label with this key and identical values are considered
to be in the same topology. We consider each
as a "bucket", and try to put balanced number of pods into
each bucket. We define a domain as a particular instance of
a topology. Also, we define an eligible domain as a domain
whose nodes meet the requirements of nodeAffinityPolicy and
nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname",
each Node is a domain of that topology. And, if TopologyKey
is "topology.kubernetes.io/zone", each zone is a domain of
that topology. It's a required field.
type: string
whenUnsatisfiable:
description: 'WhenUnsatisfiable indicates how to deal with a
pod if it doesn''t satisfy the spread constraint. - DoNotSchedule
(default) tells the scheduler not to schedule it. - ScheduleAnyway
tells the scheduler to schedule the pod in any location, but
giving higher precedence to topologies that would help reduce
the skew. A constraint is considered "Unsatisfiable" for an
incoming pod if and only if every possible node assignment
for that pod would violate "MaxSkew" on some topology. For
example, in a 3-zone cluster, MaxSkew is set to 1, and pods
with the same labelSelector spread as 3/1/1: | zone1 | zone2
| zone3 | | P P P | P | P | If WhenUnsatisfiable is
set to DoNotSchedule, incoming pod can only be scheduled to
zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on
zone2(zone3) satisfies MaxSkew(1). In other words, the cluster
can still be imbalanced, but scheduler won''t make it *more*
imbalanced. It''s a required field.'
type: string
required:
- maxSkew
- topologyKey
- whenUnsatisfiable
type: object
type: array
tracingConfig:
description: TracingConfig configures tracing in Thanos. This is an
experimental feature, it may change in any upcoming release in a
breaking way.
properties:
key:
description: The key of the secret to select from. Must be a
valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
tracingConfigFile:
description: TracingConfig specifies the path of the tracing configuration
file. When used alongside with TracingConfig, TracingConfigFile
takes precedence.
type: string
version:
description: Version of Thanos to be deployed.
type: string
volumeMounts:
description: VolumeMounts allows configuration of additional VolumeMounts
on the output StatefulSet definition. VolumeMounts specified will
be appended to other VolumeMounts in the ruler container, that are
generated as a result of StorageSpec objects.
items:
description: VolumeMount describes a mounting of a Volume within
a container.
properties:
mountPath:
description: Path within the container at which the volume should
be mounted. Must not contain ':'.
type: string
mountPropagation:
description: mountPropagation determines how mounts are propagated
from the host to container and the other way around. When
not set, MountPropagationNone is used. This field is beta
in 1.10.
type: string
name:
description: This must match the Name of a Volume.
type: string
readOnly:
description: Mounted read-only if true, read-write otherwise
(false or unspecified). Defaults to false.
type: boolean
subPath:
description: Path within the volume from which the container's
volume should be mounted. Defaults to "" (volume's root).
type: string
subPathExpr:
description: Expanded path within the volume from which the
container's volume should be mounted. Behaves similarly to
SubPath but environment variable references $(VAR_NAME) are
expanded using the container's environment. Defaults to ""
(volume's root). SubPathExpr and SubPath are mutually exclusive.
type: string
required:
- mountPath
- name
type: object
type: array
volumes:
description: Volumes allows configuration of additional volumes on
the output StatefulSet definition. Volumes specified will be appended
to other volumes that are generated as a result of StorageSpec objects.
items:
description: Volume represents a named volume in a pod that may
be accessed by any container in the pod.
properties:
awsElasticBlockStore:
description: 'awsElasticBlockStore represents an AWS Disk resource
that is attached to a kubelet''s host machine and then exposed
to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore'
properties:
fsType:
description: 'fsType is the filesystem type of the volume
that you want to mount. Tip: Ensure that the filesystem
type is supported by the host operating system. Examples:
"ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
TODO: how do we prevent errors in the filesystem from
compromising the machine'
type: string
partition:
description: 'partition is the partition in the volume that
you want to mount. If omitted, the default is to mount
by volume name. Examples: For volume /dev/sda1, you specify
the partition as "1". Similarly, the volume partition
for /dev/sda is "0" (or you can leave the property empty).'
format: int32
type: integer
readOnly:
description: 'readOnly value true will force the readOnly
setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore'
type: boolean
volumeID:
description: 'volumeID is unique ID of the persistent disk
resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore'
type: string
required:
- volumeID
type: object
azureDisk:
description: azureDisk represents an Azure Data Disk mount on
the host and bind mount to the pod.
properties:
cachingMode:
description: 'cachingMode is the Host Caching mode: None,
Read Only, Read Write.'
type: string
diskName:
description: diskName is the Name of the data disk in the
blob storage
type: string
diskURI:
description: diskURI is the URI of data disk in the blob
storage
type: string
fsType:
description: fsType is Filesystem type to mount. Must be
a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified.
type: string
kind:
description: 'kind expected values are Shared: multiple
blob disks per storage account Dedicated: single blob
disk per storage account Managed: azure managed data
disk (only in managed availability set). defaults to shared'
type: string
readOnly:
description: readOnly Defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
type: boolean
required:
- diskName
- diskURI
type: object
azureFile:
description: azureFile represents an Azure File Service mount
on the host and bind mount to the pod.
properties:
readOnly:
description: readOnly defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
type: boolean
secretName:
description: secretName is the name of secret that contains
Azure Storage Account Name and Key
type: string
shareName:
description: shareName is the azure share Name
type: string
required:
- secretName
- shareName
type: object
cephfs:
description: cephFS represents a Ceph FS mount on the host that
shares a pod's lifetime
properties:
monitors:
description: 'monitors is Required: Monitors is a collection
of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it'
items:
type: string
type: array
path:
description: 'path is Optional: Used as the mounted root,
rather than the full Ceph tree, default is /'
type: string
readOnly:
description: 'readOnly is Optional: Defaults to false (read/write).
ReadOnly here will force the ReadOnly setting in VolumeMounts.
More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it'
type: boolean
secretFile:
description: 'secretFile is Optional: SecretFile is the
path to key ring for User, default is /etc/ceph/user.secret
More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it'
type: string
secretRef:
description: 'secretRef is Optional: SecretRef is reference
to the authentication secret for User, default is empty.
More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it'
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
user:
description: 'user is optional: User is the rados user name,
default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it'
type: string
required:
- monitors
type: object
cinder:
description: 'cinder represents a cinder volume attached and
mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md'
properties:
fsType:
description: 'fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Examples: "ext4", "xfs", "ntfs". Implicitly inferred to
be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md'
type: string
readOnly:
description: 'readOnly defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md'
type: boolean
secretRef:
description: 'secretRef is optional: points to a secret
object containing parameters used to connect to OpenStack.'
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
volumeID:
description: 'volumeID used to identify the volume in cinder.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md'
type: string
required:
- volumeID
type: object
configMap:
description: configMap represents a configMap that should populate
this volume
properties:
defaultMode:
description: 'defaultMode is optional: mode bits used to
set permissions on created files by default. Must be an
octal value between 0000 and 0777 or a decimal value between
0 and 511. YAML accepts both octal and decimal values,
JSON requires decimal values for mode bits. Defaults to
0644. Directories within the path are not affected by
this setting. This might be in conflict with other options
that affect the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
items:
description: items if unspecified, each key-value pair in
the Data field of the referenced ConfigMap will be projected
into the volume as a file whose name is the key and content
is the value. If specified, the listed keys will be projected
into the specified paths, and unlisted keys will not be
present. If a key is specified which is not present in
the ConfigMap, the volume setup will error unless it is
marked optional. Paths must be relative and may not contain
the '..' path or start with '..'.
items:
description: Maps a string key to a path within a volume.
properties:
key:
description: key is the key to project.
type: string
mode:
description: 'mode is Optional: mode bits used to
set permissions on this file. Must be an octal value
between 0000 and 0777 or a decimal value between
0 and 511. YAML accepts both octal and decimal values,
JSON requires decimal values for mode bits. If not
specified, the volume defaultMode will be used.
This might be in conflict with other options that
affect the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
path:
description: path is the relative path of the file
to map the key to. May not be an absolute path.
May not contain the path element '..'. May not start
with the string '..'.
type: string
required:
- key
- path
type: object
type: array
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: optional specify whether the ConfigMap or its
keys must be defined
type: boolean
type: object
x-kubernetes-map-type: atomic
csi:
description: csi (Container Storage Interface) represents ephemeral
storage that is handled by certain external CSI drivers (Beta
feature).
properties:
driver:
description: driver is the name of the CSI driver that handles
this volume. Consult with your admin for the correct name
as registered in the cluster.
type: string
fsType:
description: fsType to mount. Ex. "ext4", "xfs", "ntfs".
If not provided, the empty value is passed to the associated
CSI driver which will determine the default filesystem
to apply.
type: string
nodePublishSecretRef:
description: nodePublishSecretRef is a reference to the
secret object containing sensitive information to pass
to the CSI driver to complete the CSI NodePublishVolume
and NodeUnpublishVolume calls. This field is optional,
and may be empty if no secret is required. If the secret
object contains more than one secret, all secret references
are passed.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
readOnly:
description: readOnly specifies a read-only configuration
for the volume. Defaults to false (read/write).
type: boolean
volumeAttributes:
additionalProperties:
type: string
description: volumeAttributes stores driver-specific properties
that are passed to the CSI driver. Consult your driver's
documentation for supported values.
type: object
required:
- driver
type: object
downwardAPI:
description: downwardAPI represents downward API about the pod
that should populate this volume
properties:
defaultMode:
description: 'Optional: mode bits to use on created files
by default. Must be a Optional: mode bits used to set
permissions on created files by default. Must be an octal
value between 0000 and 0777 or a decimal value between
0 and 511. YAML accepts both octal and decimal values,
JSON requires decimal values for mode bits. Defaults to
0644. Directories within the path are not affected by
this setting. This might be in conflict with other options
that affect the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
items:
description: Items is a list of downward API volume file
items:
description: DownwardAPIVolumeFile represents information
to create the file containing the pod field
properties:
fieldRef:
description: 'Required: Selects a field of the pod:
only annotations, labels, name and namespace are
supported.'
properties:
apiVersion:
description: Version of the schema the FieldPath
is written in terms of, defaults to "v1".
type: string
fieldPath:
description: Path of the field to select in the
specified API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
mode:
description: 'Optional: mode bits used to set permissions
on this file, must be an octal value between 0000
and 0777 or a decimal value between 0 and 511. YAML
accepts both octal and decimal values, JSON requires
decimal values for mode bits. If not specified,
the volume defaultMode will be used. This might
be in conflict with other options that affect the
file mode, like fsGroup, and the result can be other
mode bits set.'
format: int32
type: integer
path:
description: 'Required: Path is the relative path
name of the file to be created. Must not be absolute
or contain the ''..'' path. Must be utf-8 encoded.
The first item of the relative path must not start
with ''..'''
type: string
resourceFieldRef:
description: 'Selects a resource of the container:
only resources limits and requests (limits.cpu,
limits.memory, requests.cpu and requests.memory)
are currently supported.'
properties:
containerName:
description: 'Container name: required for volumes,
optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format of the
exposed resources, defaults to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
description: 'Required: resource to select'
type: string
required:
- resource
type: object
x-kubernetes-map-type: atomic
required:
- path
type: object
type: array
type: object
emptyDir:
description: 'emptyDir represents a temporary directory that
shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
properties:
medium:
description: 'medium represents what type of storage medium
should back this directory. The default is "" which means
to use the node''s default medium. Must be an empty string
(default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
type: string
sizeLimit:
anyOf:
- type: integer
- type: string
description: 'sizeLimit is the total amount of local storage
required for this EmptyDir volume. The size limit is also
applicable for memory medium. The maximum usage on memory
medium EmptyDir would be the minimum value between the
SizeLimit specified here and the sum of memory limits
of all containers in a pod. The default is nil which means
that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir'
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
type: object
ephemeral:
description: "ephemeral represents a volume that is handled
by a cluster storage driver. The volume's lifecycle is tied
to the pod that defines it - it will be created before the
pod starts, and deleted when the pod is removed. \n Use this
if: a) the volume is only needed while the pod runs, b) features
of normal volumes like restoring from snapshot or capacity
tracking are needed, c) the storage driver is specified through
a storage class, and d) the storage driver supports dynamic
volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource
for more information on the connection between this volume
type and PersistentVolumeClaim). \n Use PersistentVolumeClaim
or one of the vendor-specific APIs for volumes that persist
for longer than the lifecycle of an individual pod. \n Use
CSI for light-weight local ephemeral volumes if the CSI driver
is meant to be used that way - see the documentation of the
driver for more information. \n A pod can use both types of
ephemeral volumes and persistent volumes at the same time."
properties:
volumeClaimTemplate:
description: "Will be used to create a stand-alone PVC to
provision the volume. The pod in which this EphemeralVolumeSource
is embedded will be the owner of the PVC, i.e. the PVC
will be deleted together with the pod. The name of the
PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry.
Pod validation will reject the pod if the concatenated
name is not valid for a PVC (for example, too long). \n
An existing PVC with that name that is not owned by the
pod will *not* be used for the pod to avoid using an unrelated
volume by mistake. Starting the pod is then blocked until
the unrelated PVC is removed. If such a pre-created PVC
is meant to be used by the pod, the PVC has to updated
with an owner reference to the pod once the pod exists.
Normally this should not be necessary, but it may be useful
when manually reconstructing a broken cluster. \n This
field is read-only and no changes will be made by Kubernetes
to the PVC after it has been created. \n Required, must
not be nil."
properties:
metadata:
description: May contain labels and annotations that
will be copied into the PVC when creating it. No other
fields are allowed and will be rejected during validation.
type: object
spec:
description: The specification for the PersistentVolumeClaim.
The entire content is copied unchanged into the PVC
that gets created from this template. The same fields
as in a PersistentVolumeClaim are also valid here.
properties:
accessModes:
description: 'accessModes contains the desired access
modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
items:
type: string
type: array
dataSource:
description: 'dataSource field can be used to specify
either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
* An existing PVC (PersistentVolumeClaim) If the
provisioner or an external controller can support
the specified data source, it will create a new
volume based on the contents of the specified
data source. When the AnyVolumeDataSource feature
gate is enabled, dataSource contents will be copied
to dataSourceRef, and dataSourceRef contents will
be copied to dataSource when dataSourceRef.namespace
is not specified. If the namespace is specified,
then dataSourceRef will not be copied to dataSource.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API
group. For any other third-party types, APIGroup
is required.
type: string
kind:
description: Kind is the type of resource being
referenced
type: string
name:
description: Name is the name of resource being
referenced
type: string
required:
- kind
- name
type: object
x-kubernetes-map-type: atomic
dataSourceRef:
description: 'dataSourceRef specifies the object
from which to populate the volume with data, if
a non-empty volume is desired. This may be any
object from a non-empty API group (non core object)
or a PersistentVolumeClaim object. When this field
is specified, volume binding will only succeed
if the type of the specified object matches some
installed volume populator or dynamic provisioner.
This field will replace the functionality of the
dataSource field and as such if both fields are
non-empty, they must have the same value. For
backwards compatibility, when namespace isn''t
specified in dataSourceRef, both fields (dataSource
and dataSourceRef) will be set to the same value
automatically if one of them is empty and the
other is non-empty. When namespace is specified
in dataSourceRef, dataSource isn''t set to the
same value and must be empty. There are three
important differences between dataSource and dataSourceRef:
* While dataSource only allows two specific types
of objects, dataSourceRef allows any non-core
object, as well as PersistentVolumeClaim objects.
* While dataSource ignores disallowed values (dropping
them), dataSourceRef preserves all values, and
generates an error if a disallowed value is specified.
* While dataSource only allows local objects,
dataSourceRef allows objects in any namespaces.
(Beta) Using this field requires the AnyVolumeDataSource
feature gate to be enabled. (Alpha) Using the
namespace field of dataSourceRef requires the
CrossNamespaceVolumeDataSource feature gate to
be enabled.'
properties:
apiGroup:
description: APIGroup is the group for the resource
being referenced. If APIGroup is not specified,
the specified Kind must be in the core API
group. For any other third-party types, APIGroup
is required.
type: string
kind:
description: Kind is the type of resource being
referenced
type: string
name:
description: Name is the name of resource being
referenced
type: string
namespace:
description: Namespace is the namespace of resource
being referenced Note that when a namespace
is specified, a gateway.networking.k8s.io/ReferenceGrant
object is required in the referent namespace
to allow that namespace's owner to accept
the reference. See the ReferenceGrant documentation
for details. (Alpha) This field requires the
CrossNamespaceVolumeDataSource feature gate
to be enabled.
type: string
required:
- kind
- name
type: object
resources:
description: 'resources represents the minimum resources
the volume should have. If RecoverVolumeExpansionFailure
feature is enabled users are allowed to specify
resource requirements that are lower than previous
value but must still be higher than capacity recorded
in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
properties:
claims:
description: "Claims lists the names of resources,
defined in spec.resourceClaims, that are used
by this container. \n This is an alpha field
and requires enabling the DynamicResourceAllocation
feature gate. \n This field is immutable.
It can only be set for containers."
items:
description: ResourceClaim references one
entry in PodSpec.ResourceClaims.
properties:
name:
description: Name must match the name
of one entry in pod.spec.resourceClaims
of the Pod where this field is used.
It makes that resource available inside
a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Limits describes the maximum amount
of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum
amount of compute resources required. If Requests
is omitted for a container, it defaults to
Limits if that is explicitly specified, otherwise
to an implementation-defined value. Requests
cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
selector:
description: selector is a label query over volumes
to consider for binding.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
storageClassName:
description: 'storageClassName is the name of the
StorageClass required by the claim. More info:
https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
type: string
volumeMode:
description: volumeMode defines what type of volume
is required by the claim. Value of Filesystem
is implied when not included in claim spec.
type: string
volumeName:
description: volumeName is the binding reference
to the PersistentVolume backing this claim.
type: string
type: object
required:
- spec
type: object
type: object
fc:
description: fc represents a Fibre Channel resource that is
attached to a kubelet's host machine and then exposed to the
pod.
properties:
fsType:
description: 'fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified. TODO: how do we prevent errors in the
filesystem from compromising the machine'
type: string
lun:
description: 'lun is Optional: FC target lun number'
format: int32
type: integer
readOnly:
description: 'readOnly is Optional: Defaults to false (read/write).
ReadOnly here will force the ReadOnly setting in VolumeMounts.'
type: boolean
targetWWNs:
description: 'targetWWNs is Optional: FC target worldwide
names (WWNs)'
items:
type: string
type: array
wwids:
description: 'wwids Optional: FC volume world wide identifiers
(wwids) Either wwids or combination of targetWWNs and
lun must be set, but not both simultaneously.'
items:
type: string
type: array
type: object
flexVolume:
description: flexVolume represents a generic volume resource
that is provisioned/attached using an exec based plugin.
properties:
driver:
description: driver is the name of the driver to use for
this volume.
type: string
fsType:
description: fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". The default filesystem depends
on FlexVolume script.
type: string
options:
additionalProperties:
type: string
description: 'options is Optional: this field holds extra
command options if any.'
type: object
readOnly:
description: 'readOnly is Optional: defaults to false (read/write).
ReadOnly here will force the ReadOnly setting in VolumeMounts.'
type: boolean
secretRef:
description: 'secretRef is Optional: secretRef is reference
to the secret object containing sensitive information
to pass to the plugin scripts. This may be empty if no
secret object is specified. If the secret object contains
more than one secret, all secrets are passed to the plugin
scripts.'
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
required:
- driver
type: object
flocker:
description: flocker represents a Flocker volume attached to
a kubelet's host machine. This depends on the Flocker control
service being running
properties:
datasetName:
description: datasetName is Name of the dataset stored as
metadata -> name on the dataset for Flocker should be
considered as deprecated
type: string
datasetUUID:
description: datasetUUID is the UUID of the dataset. This
is unique identifier of a Flocker dataset
type: string
type: object
gcePersistentDisk:
description: 'gcePersistentDisk represents a GCE Disk resource
that is attached to a kubelet''s host machine and then exposed
to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk'
properties:
fsType:
description: 'fsType is filesystem type of the volume that
you want to mount. Tip: Ensure that the filesystem type
is supported by the host operating system. Examples: "ext4",
"xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
TODO: how do we prevent errors in the filesystem from
compromising the machine'
type: string
partition:
description: 'partition is the partition in the volume that
you want to mount. If omitted, the default is to mount
by volume name. Examples: For volume /dev/sda1, you specify
the partition as "1". Similarly, the volume partition
for /dev/sda is "0" (or you can leave the property empty).
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk'
format: int32
type: integer
pdName:
description: 'pdName is unique name of the PD resource in
GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk'
type: string
readOnly:
description: 'readOnly here will force the ReadOnly setting
in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk'
type: boolean
required:
- pdName
type: object
gitRepo:
description: 'gitRepo represents a git repository at a particular
revision. DEPRECATED: GitRepo is deprecated. To provision
a container with a git repo, mount an EmptyDir into an InitContainer
that clones the repo using git, then mount the EmptyDir into
the Pod''s container.'
properties:
directory:
description: directory is the target directory name. Must
not contain or start with '..'. If '.' is supplied, the
volume directory will be the git repository. Otherwise,
if specified, the volume will contain the git repository
in the subdirectory with the given name.
type: string
repository:
description: repository is the URL
type: string
revision:
description: revision is the commit hash for the specified
revision.
type: string
required:
- repository
type: object
glusterfs:
description: 'glusterfs represents a Glusterfs mount on the
host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md'
properties:
endpoints:
description: 'endpoints is the endpoint name that details
Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod'
type: string
path:
description: 'path is the Glusterfs volume path. More info:
https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod'
type: string
readOnly:
description: 'readOnly here will force the Glusterfs volume
to be mounted with read-only permissions. Defaults to
false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod'
type: boolean
required:
- endpoints
- path
type: object
hostPath:
description: 'hostPath represents a pre-existing file or directory
on the host machine that is directly exposed to the container.
This is generally used for system agents or other privileged
things that are allowed to see the host machine. Most containers
will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
--- TODO(jonesdl) We need to restrict who can use host directory
mounts and who can/can not mount host directories as read/write.'
properties:
path:
description: 'path of the directory on the host. If the
path is a symlink, it will follow the link to the real
path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath'
type: string
type:
description: 'type for HostPath Volume Defaults to "" More
info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath'
type: string
required:
- path
type: object
iscsi:
description: 'iscsi represents an ISCSI Disk resource that is
attached to a kubelet''s host machine and then exposed to
the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md'
properties:
chapAuthDiscovery:
description: chapAuthDiscovery defines whether support iSCSI
Discovery CHAP authentication
type: boolean
chapAuthSession:
description: chapAuthSession defines whether support iSCSI
Session CHAP authentication
type: boolean
fsType:
description: 'fsType is the filesystem type of the volume
that you want to mount. Tip: Ensure that the filesystem
type is supported by the host operating system. Examples:
"ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
TODO: how do we prevent errors in the filesystem from
compromising the machine'
type: string
initiatorName:
description: initiatorName is the custom iSCSI Initiator
Name. If initiatorName is specified with iscsiInterface
simultaneously, new iSCSI interface : will be created for the connection.
type: string
iqn:
description: iqn is the target iSCSI Qualified Name.
type: string
iscsiInterface:
description: iscsiInterface is the interface Name that uses
an iSCSI transport. Defaults to 'default' (tcp).
type: string
lun:
description: lun represents iSCSI Target Lun number.
format: int32
type: integer
portals:
description: portals is the iSCSI Target Portal List. The
portal is either an IP or ip_addr:port if the port is
other than default (typically TCP ports 860 and 3260).
items:
type: string
type: array
readOnly:
description: readOnly here will force the ReadOnly setting
in VolumeMounts. Defaults to false.
type: boolean
secretRef:
description: secretRef is the CHAP Secret for iSCSI target
and initiator authentication
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
targetPortal:
description: targetPortal is iSCSI Target Portal. The Portal
is either an IP or ip_addr:port if the port is other than
default (typically TCP ports 860 and 3260).
type: string
required:
- iqn
- lun
- targetPortal
type: object
name:
description: 'name of the volume. Must be a DNS_LABEL and unique
within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
nfs:
description: 'nfs represents an NFS mount on the host that shares
a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs'
properties:
path:
description: 'path that is exported by the NFS server. More
info: https://kubernetes.io/docs/concepts/storage/volumes#nfs'
type: string
readOnly:
description: 'readOnly here will force the NFS export to
be mounted with read-only permissions. Defaults to false.
More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs'
type: boolean
server:
description: 'server is the hostname or IP address of the
NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs'
type: string
required:
- path
- server
type: object
persistentVolumeClaim:
description: 'persistentVolumeClaimVolumeSource represents a
reference to a PersistentVolumeClaim in the same namespace.
More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
properties:
claimName:
description: 'claimName is the name of a PersistentVolumeClaim
in the same namespace as the pod using this volume. More
info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
type: string
readOnly:
description: readOnly Will force the ReadOnly setting in
VolumeMounts. Default false.
type: boolean
required:
- claimName
type: object
photonPersistentDisk:
description: photonPersistentDisk represents a PhotonController
persistent disk attached and mounted on kubelets host machine
properties:
fsType:
description: fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified.
type: string
pdID:
description: pdID is the ID that identifies Photon Controller
persistent disk
type: string
required:
- pdID
type: object
portworxVolume:
description: portworxVolume represents a portworx volume attached
and mounted on kubelets host machine
properties:
fsType:
description: fSType represents the filesystem type to mount
Must be a filesystem type supported by the host operating
system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4"
if unspecified.
type: string
readOnly:
description: readOnly defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
type: boolean
volumeID:
description: volumeID uniquely identifies a Portworx volume
type: string
required:
- volumeID
type: object
projected:
description: projected items for all in one resources secrets,
configmaps, and downward API
properties:
defaultMode:
description: defaultMode are the mode bits used to set permissions
on created files by default. Must be an octal value between
0000 and 0777 or a decimal value between 0 and 511. YAML
accepts both octal and decimal values, JSON requires decimal
values for mode bits. Directories within the path are
not affected by this setting. This might be in conflict
with other options that affect the file mode, like fsGroup,
and the result can be other mode bits set.
format: int32
type: integer
sources:
description: sources is the list of volume projections
items:
description: Projection that may be projected along with
other supported volume types
properties:
configMap:
description: configMap information about the configMap
data to project
properties:
items:
description: items if unspecified, each key-value
pair in the Data field of the referenced ConfigMap
will be projected into the volume as a file
whose name is the key and content is the value.
If specified, the listed keys will be projected
into the specified paths, and unlisted keys
will not be present. If a key is specified which
is not present in the ConfigMap, the volume
setup will error unless it is marked optional.
Paths must be relative and may not contain the
'..' path or start with '..'.
items:
description: Maps a string key to a path within
a volume.
properties:
key:
description: key is the key to project.
type: string
mode:
description: 'mode is Optional: mode bits
used to set permissions on this file.
Must be an octal value between 0000 and
0777 or a decimal value between 0 and
511. YAML accepts both octal and decimal
values, JSON requires decimal values for
mode bits. If not specified, the volume
defaultMode will be used. This might be
in conflict with other options that affect
the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
path:
description: path is the relative path of
the file to map the key to. May not be
an absolute path. May not contain the
path element '..'. May not start with
the string '..'.
type: string
required:
- key
- path
type: object
type: array
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: optional specify whether the ConfigMap
or its keys must be defined
type: boolean
type: object
x-kubernetes-map-type: atomic
downwardAPI:
description: downwardAPI information about the downwardAPI
data to project
properties:
items:
description: Items is a list of DownwardAPIVolume
file
items:
description: DownwardAPIVolumeFile represents
information to create the file containing
the pod field
properties:
fieldRef:
description: 'Required: Selects a field
of the pod: only annotations, labels,
name and namespace are supported.'
properties:
apiVersion:
description: Version of the schema the
FieldPath is written in terms of,
defaults to "v1".
type: string
fieldPath:
description: Path of the field to select
in the specified API version.
type: string
required:
- fieldPath
type: object
x-kubernetes-map-type: atomic
mode:
description: 'Optional: mode bits used to
set permissions on this file, must be
an octal value between 0000 and 0777 or
a decimal value between 0 and 511. YAML
accepts both octal and decimal values,
JSON requires decimal values for mode
bits. If not specified, the volume defaultMode
will be used. This might be in conflict
with other options that affect the file
mode, like fsGroup, and the result can
be other mode bits set.'
format: int32
type: integer
path:
description: 'Required: Path is the relative
path name of the file to be created. Must
not be absolute or contain the ''..''
path. Must be utf-8 encoded. The first
item of the relative path must not start
with ''..'''
type: string
resourceFieldRef:
description: 'Selects a resource of the
container: only resources limits and requests
(limits.cpu, limits.memory, requests.cpu
and requests.memory) are currently supported.'
properties:
containerName:
description: 'Container name: required
for volumes, optional for env vars'
type: string
divisor:
anyOf:
- type: integer
- type: string
description: Specifies the output format
of the exposed resources, defaults
to "1"
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
resource:
description: 'Required: resource to
select'
type: string
required:
- resource
type: object
x-kubernetes-map-type: atomic
required:
- path
type: object
type: array
type: object
secret:
description: secret information about the secret data
to project
properties:
items:
description: items if unspecified, each key-value
pair in the Data field of the referenced Secret
will be projected into the volume as a file
whose name is the key and content is the value.
If specified, the listed keys will be projected
into the specified paths, and unlisted keys
will not be present. If a key is specified which
is not present in the Secret, the volume setup
will error unless it is marked optional. Paths
must be relative and may not contain the '..'
path or start with '..'.
items:
description: Maps a string key to a path within
a volume.
properties:
key:
description: key is the key to project.
type: string
mode:
description: 'mode is Optional: mode bits
used to set permissions on this file.
Must be an octal value between 0000 and
0777 or a decimal value between 0 and
511. YAML accepts both octal and decimal
values, JSON requires decimal values for
mode bits. If not specified, the volume
defaultMode will be used. This might be
in conflict with other options that affect
the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
path:
description: path is the relative path of
the file to map the key to. May not be
an absolute path. May not contain the
path element '..'. May not start with
the string '..'.
type: string
required:
- key
- path
type: object
type: array
name:
description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind,
uid?'
type: string
optional:
description: optional field specify whether the
Secret or its key must be defined
type: boolean
type: object
x-kubernetes-map-type: atomic
serviceAccountToken:
description: serviceAccountToken is information about
the serviceAccountToken data to project
properties:
audience:
description: audience is the intended audience
of the token. A recipient of a token must identify
itself with an identifier specified in the audience
of the token, and otherwise should reject the
token. The audience defaults to the identifier
of the apiserver.
type: string
expirationSeconds:
description: expirationSeconds is the requested
duration of validity of the service account
token. As the token approaches expiration, the
kubelet volume plugin will proactively rotate
the service account token. The kubelet will
start trying to rotate the token if the token
is older than 80 percent of its time to live
or if the token is older than 24 hours.Defaults
to 1 hour and must be at least 10 minutes.
format: int64
type: integer
path:
description: path is the path relative to the
mount point of the file to project the token
into.
type: string
required:
- path
type: object
type: object
type: array
type: object
quobyte:
description: quobyte represents a Quobyte mount on the host
that shares a pod's lifetime
properties:
group:
description: group to map volume access to Default is no
group
type: string
readOnly:
description: readOnly here will force the Quobyte volume
to be mounted with read-only permissions. Defaults to
false.
type: boolean
registry:
description: registry represents a single or multiple Quobyte
Registry services specified as a string as host:port pair
(multiple entries are separated with commas) which acts
as the central registry for volumes
type: string
tenant:
description: tenant owning the given Quobyte volume in the
Backend Used with dynamically provisioned Quobyte volumes,
value is set by the plugin
type: string
user:
description: user to map volume access to Defaults to serivceaccount
user
type: string
volume:
description: volume is a string that references an already
created Quobyte volume by name.
type: string
required:
- registry
- volume
type: object
rbd:
description: 'rbd represents a Rados Block Device mount on the
host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md'
properties:
fsType:
description: 'fsType is the filesystem type of the volume
that you want to mount. Tip: Ensure that the filesystem
type is supported by the host operating system. Examples:
"ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
TODO: how do we prevent errors in the filesystem from
compromising the machine'
type: string
image:
description: 'image is the rados image name. More info:
https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
type: string
keyring:
description: 'keyring is the path to key ring for RBDUser.
Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
type: string
monitors:
description: 'monitors is a collection of Ceph monitors.
More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
items:
type: string
type: array
pool:
description: 'pool is the rados pool name. Default is rbd.
More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
type: string
readOnly:
description: 'readOnly here will force the ReadOnly setting
in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
type: boolean
secretRef:
description: 'secretRef is name of the authentication secret
for RBDUser. If provided overrides keyring. Default is
nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
user:
description: 'user is the rados user name. Default is admin.
More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it'
type: string
required:
- image
- monitors
type: object
scaleIO:
description: scaleIO represents a ScaleIO persistent volume
attached and mounted on Kubernetes nodes.
properties:
fsType:
description: fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Default is "xfs".
type: string
gateway:
description: gateway is the host address of the ScaleIO
API Gateway.
type: string
protectionDomain:
description: protectionDomain is the name of the ScaleIO
Protection Domain for the configured storage.
type: string
readOnly:
description: readOnly Defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
type: boolean
secretRef:
description: secretRef references to the secret for ScaleIO
user and other sensitive information. If this is not provided,
Login operation will fail.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
sslEnabled:
description: sslEnabled Flag enable/disable SSL communication
with Gateway, default false
type: boolean
storageMode:
description: storageMode indicates whether the storage for
a volume should be ThickProvisioned or ThinProvisioned.
Default is ThinProvisioned.
type: string
storagePool:
description: storagePool is the ScaleIO Storage Pool associated
with the protection domain.
type: string
system:
description: system is the name of the storage system as
configured in ScaleIO.
type: string
volumeName:
description: volumeName is the name of a volume already
created in the ScaleIO system that is associated with
this volume source.
type: string
required:
- gateway
- secretRef
- system
type: object
secret:
description: 'secret represents a secret that should populate
this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret'
properties:
defaultMode:
description: 'defaultMode is Optional: mode bits used to
set permissions on created files by default. Must be an
octal value between 0000 and 0777 or a decimal value between
0 and 511. YAML accepts both octal and decimal values,
JSON requires decimal values for mode bits. Defaults to
0644. Directories within the path are not affected by
this setting. This might be in conflict with other options
that affect the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
items:
description: items If unspecified, each key-value pair in
the Data field of the referenced Secret will be projected
into the volume as a file whose name is the key and content
is the value. If specified, the listed keys will be projected
into the specified paths, and unlisted keys will not be
present. If a key is specified which is not present in
the Secret, the volume setup will error unless it is marked
optional. Paths must be relative and may not contain the
'..' path or start with '..'.
items:
description: Maps a string key to a path within a volume.
properties:
key:
description: key is the key to project.
type: string
mode:
description: 'mode is Optional: mode bits used to
set permissions on this file. Must be an octal value
between 0000 and 0777 or a decimal value between
0 and 511. YAML accepts both octal and decimal values,
JSON requires decimal values for mode bits. If not
specified, the volume defaultMode will be used.
This might be in conflict with other options that
affect the file mode, like fsGroup, and the result
can be other mode bits set.'
format: int32
type: integer
path:
description: path is the relative path of the file
to map the key to. May not be an absolute path.
May not contain the path element '..'. May not start
with the string '..'.
type: string
required:
- key
- path
type: object
type: array
optional:
description: optional field specify whether the Secret or
its keys must be defined
type: boolean
secretName:
description: 'secretName is the name of the secret in the
pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret'
type: string
type: object
storageos:
description: storageOS represents a StorageOS volume attached
and mounted on Kubernetes nodes.
properties:
fsType:
description: fsType is the filesystem type to mount. Must
be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified.
type: string
readOnly:
description: readOnly defaults to false (read/write). ReadOnly
here will force the ReadOnly setting in VolumeMounts.
type: boolean
secretRef:
description: secretRef specifies the secret to use for obtaining
the StorageOS API credentials. If not specified, default
values will be attempted.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
x-kubernetes-map-type: atomic
volumeName:
description: volumeName is the human-readable name of the
StorageOS volume. Volume names are only unique within
a namespace.
type: string
volumeNamespace:
description: volumeNamespace specifies the scope of the
volume within StorageOS. If no namespace is specified
then the Pod's namespace will be used. This allows the
Kubernetes name scoping to be mirrored within StorageOS
for tighter integration. Set VolumeName to any name to
override the default behaviour. Set to "default" if you
are not using namespaces within StorageOS. Namespaces
that do not pre-exist within StorageOS will be created.
type: string
type: object
vsphereVolume:
description: vsphereVolume represents a vSphere volume attached
and mounted on kubelets host machine
properties:
fsType:
description: fsType is filesystem type to mount. Must be
a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4"
if unspecified.
type: string
storagePolicyID:
description: storagePolicyID is the storage Policy Based
Management (SPBM) profile ID associated with the StoragePolicyName.
type: string
storagePolicyName:
description: storagePolicyName is the storage Policy Based
Management (SPBM) profile name.
type: string
volumePath:
description: volumePath is the path that identifies vSphere
volume vmdk
type: string
required:
- volumePath
type: object
required:
- name
type: object
type: array
type: object
status:
description: 'Most recent observed status of the ThanosRuler cluster.
Read-only. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status'
properties:
availableReplicas:
description: Total number of available pods (ready for at least minReadySeconds)
targeted by this ThanosRuler deployment.
format: int32
type: integer
conditions:
description: The current state of the Alertmanager object.
items:
description: Condition represents the state of the resources associated
with the Prometheus, Alertmanager or ThanosRuler resource.
properties:
lastTransitionTime:
description: lastTransitionTime is the time of the last update
to the current status property.
format: date-time
type: string
message:
description: Human-readable message indicating details for the
condition's last transition.
type: string
observedGeneration:
description: ObservedGeneration represents the .metadata.generation
that the condition was set based upon. For instance, if `.metadata.generation`
is currently 12, but the `.status.conditions[].observedGeneration`
is 9, the condition is out of date with respect to the current
state of the instance.
format: int64
type: integer
reason:
description: Reason for the condition's last transition.
type: string
status:
description: Status of the condition.
type: string
type:
description: Type of the condition being reported.
type: string
required:
- lastTransitionTime
- status
- type
type: object
type: array
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
paused:
description: Represents whether any actions on the underlying managed
objects are being performed. Only delete actions will be performed.
type: boolean
replicas:
description: Total number of non-terminated pods targeted by this
ThanosRuler deployment (their labels match the selector).
format: int32
type: integer
unavailableReplicas:
description: Total number of unavailable pods targeted by this ThanosRuler
deployment.
format: int32
type: integer
updatedReplicas:
description: Total number of non-terminated pods targeted by this
ThanosRuler deployment that have the desired version spec.
format: int32
type: integer
required:
- availableReplicas
- paused
- replicas
- unavailableReplicas
- updatedReplicas
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
{{ end }}
================================================
FILE: third_party/kube-prometheus-stack/BUILD.bazel
================================================
# https://github.com/helm/charts/blob/master/LICENSE
# Apache license
licenses(["notice"])
# this chart was downloaded by running:
# helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
# helm fetch prometheus-community/kube-prometheus-stack --version=x.y.z
# then edit VERSION= in update_crd.sh
# then run update_crd.sh to generate new 00-crds.yaml
exports_files(
glob([
"*.tgz",
"*.yaml",
]),
visibility = ["//visibility:public"],
)
================================================
FILE: third_party/kube-prometheus-stack/update_crd.sh
================================================
#!/bin/bash
# match the version to the app version in this command:
# helm search repo prometheus-community/kube-prometheus-stack --version='x.y.z'
VERSION=0.71
# https://github.com/prometheus-operator/prometheus-operator/tree/main/example/prometheus-operator-crd
# TODO(ensonic): can we tak this directly from the chart?
BASEURL="https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-${VERSION}/example/prometheus-operator-crd/monitoring.coreos.com"
OUT=00-crds.yaml
curl > "${OUT}" ${BASEURL}_probes.yaml
OUT=01-crds.yaml
echo '{{ if eq .Values.app_management "true" }}' > "${OUT}"
for CRD in alertmanagerconfigs alertmanagers prometheuses prometheusrules podmonitors scrapeconfigs servicemonitors thanosrulers; do
# these already have "---" separators
curl >> "${OUT}" ${BASEURL}_${CRD}.yaml
done
echo '{{ end }}' >> "${OUT}"
================================================
FILE: third_party/kubernetes_proto/meta/BUILD.bazel
================================================
load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library")
load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
licenses(["notice"])
proto_library(
name = "meta_proto",
srcs = ["generated.proto"],
visibility = ["//visibility:public"],
deps = [
"//third_party/kubernetes_proto/runtime:runtime_proto",
"//third_party/kubernetes_proto/schema:schema_proto",
"@com_google_protobuf//:timestamp_proto",
],
)
go_proto_library(
name = "meta_go_proto",
compilers = ["@io_bazel_rules_go//proto:go_grpc"],
importpath = "github.com/googlecloudrobotics/core/third_party/kubernetes_proto/meta",
proto = ":meta_proto",
visibility = ["//visibility:public"],
deps = [
"//third_party/kubernetes_proto/runtime:go_default_library",
"//third_party/kubernetes_proto/schema:go_default_library",
],
)
go_library(
name = "go_default_library",
embed = [":meta_go_proto"],
importpath = "github.com/googlecloudrobotics/core/third_party/kubernetes_proto/meta",
visibility = ["//visibility:public"],
)
cc_proto_library(
name = "meta_cc_proto",
visibility = ["//visibility:public"],
deps = [":meta_proto"],
)
================================================
FILE: third_party/kubernetes_proto/meta/README.md
================================================
# Manual modification of `generated.proto` #
The file `generated.proto` was modified from its original upstream version in
the following way:
All fields of type `Time` have been replaced with identically named fields of
type `google.protobuf.Timestamp`. This modification is necessary to enable
conversion between proto binary format and JSON without unmarshaling to Go
structs as an intermediate step.
================================================
FILE: third_party/kubernetes_proto/meta/generated.proto
================================================
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = "proto2";
package k8s.io.apimachinery.pkg.apis.meta.v1;
// --- BEGIN MANUAL EDIT ---
import "third_party/kubernetes_proto/runtime/generated.proto";
import "third_party/kubernetes_proto/schema/generated.proto";
import "google/protobuf/timestamp.proto";
// --- END MANUAL EDIT ---
// Package-wide variables from generator "generated".
option go_package = "k8s.io/apimachinery/pkg/apis/meta/v1";
// APIGroup contains the name, the supported versions, and the preferred version
// of a group.
message APIGroup {
// name is the name of the group.
optional string name = 1;
// versions are the versions supported in this group.
repeated GroupVersionForDiscovery versions = 2;
// preferredVersion is the version preferred by the API server, which
// probably is the storage version.
// +optional
optional GroupVersionForDiscovery preferredVersion = 3;
// a map of client CIDR to server address that is serving this group.
// This is to help clients reach servers in the most network-efficient way possible.
// Clients can use the appropriate server address as per the CIDR that they match.
// In case of multiple matches, clients should use the longest matching CIDR.
// The server returns only those CIDRs that it thinks that the client can match.
// For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
// Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
// +optional
repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4;
}
// APIGroupList is a list of APIGroup, to allow clients to discover the API at
// /apis.
message APIGroupList {
// groups is a list of APIGroup.
repeated APIGroup groups = 1;
}
// APIResource specifies the name of a resource and whether it is namespaced.
message APIResource {
// name is the plural name of the resource.
optional string name = 1;
// singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely.
// The singularName is more correct for reporting status on a single item and both singular and plural are allowed
// from the kubectl CLI interface.
optional string singularName = 6;
// namespaced indicates if a resource is namespaced or not.
optional bool namespaced = 2;
// group is the preferred group of the resource. Empty implies the group of the containing resource list.
// For subresources, this may have a different value, for example: Scale".
optional string group = 8;
// version is the preferred version of the resource. Empty implies the version of the containing resource list
// For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)".
optional string version = 9;
// kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')
optional string kind = 3;
// verbs is a list of supported kube verbs (this includes get, list, watch, create,
// update, patch, delete, deletecollection, and proxy)
optional Verbs verbs = 4;
// shortNames is a list of suggested short names of the resource.
repeated string shortNames = 5;
// categories is a list of the grouped resources this resource belongs to (e.g. 'all')
repeated string categories = 7;
// The hash value of the storage version, the version this resource is
// converted to when written to the data store. Value must be treated
// as opaque by clients. Only equality comparison on the value is valid.
// This is an alpha feature and may change or be removed in the future.
// The field is populated by the apiserver only if the
// StorageVersionHash feature gate is enabled.
// This field will remain optional even if it graduates.
// +optional
optional string storageVersionHash = 10;
}
// APIResourceList is a list of APIResource, it is used to expose the name of the
// resources supported in a specific group and version, and if the resource
// is namespaced.
message APIResourceList {
// groupVersion is the group and version this APIResourceList is for.
optional string groupVersion = 1;
// resources contains the name of the resources and if they are namespaced.
repeated APIResource resources = 2;
}
// APIVersions lists the versions that are available, to allow clients to
// discover the API at /api, which is the root path of the legacy v1 API.
//
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message APIVersions {
// versions are the api versions that are available.
repeated string versions = 1;
// a map of client CIDR to server address that is serving this group.
// This is to help clients reach servers in the most network-efficient way possible.
// Clients can use the appropriate server address as per the CIDR that they match.
// In case of multiple matches, clients should use the longest matching CIDR.
// The server returns only those CIDRs that it thinks that the client can match.
// For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
// Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2;
}
// ApplyOptions may be provided when applying an API object.
// FieldManager is required for apply requests.
// ApplyOptions is equivalent to PatchOptions. It is provided as a convenience with documentation
// that speaks specifically to how the options fields relate to apply.
message ApplyOptions {
// When present, indicates that modifications should not be
// persisted. An invalid or unrecognized dryRun directive will
// result in an error response and no further processing of the
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
repeated string dryRun = 1;
// Force is going to "force" Apply requests. It means user will
// re-acquire conflicting fields owned by other people.
optional bool force = 2;
// fieldManager is a name associated with the actor or entity
// that is making these changes. The value must be less than or
// 128 characters long, and only contain printable characters,
// as defined by https://golang.org/pkg/unicode/#IsPrint. This
// field is required.
optional string fieldManager = 3;
}
// Condition contains details for one aspect of the current state of this API Resource.
// ---
// This struct is intended for direct use as an array at the field path .status.conditions. For example,
//
// type FooStatus struct{
// // Represents the observations of a foo's current state.
// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded"
// // +patchMergeKey=type
// // +patchStrategy=merge
// // +listType=map
// // +listMapKey=type
// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
//
// // other fields
// }
message Condition {
// type of condition in CamelCase or in foo.example.com/CamelCase.
// ---
// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
// useful (see .node.status.conditions), the ability to deconflict is important.
// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
// +kubebuilder:validation:MaxLength=316
optional string type = 1;
// status of the condition, one of True, False, Unknown.
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:Enum=True;False;Unknown
optional string status = 2;
// observedGeneration represents the .metadata.generation that the condition was set based upon.
// For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
// with respect to the current state of the instance.
// +optional
// +kubebuilder:validation:Minimum=0
optional int64 observedGeneration = 3;
// lastTransitionTime is the last time the condition transitioned from one status to another.
// This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Format=date-time
// --- BEGIN MANUAL EDIT ---
//optional Time lastTransitionTime = 4;
optional google.protobuf.Timestamp lastTransitionTime = 4;
// --- END MANUAL EDIT ---
// reason contains a programmatic identifier indicating the reason for the condition's last transition.
// Producers of specific condition types may define expected values and meanings for this field,
// and whether the values are considered a guaranteed API.
// The value should be a CamelCase string.
// This field may not be empty.
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:MaxLength=1024
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:Pattern=`^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$`
optional string reason = 5;
// message is a human readable message indicating details about the transition.
// This may be an empty string.
// +required
// +kubebuilder:validation:Required
// +kubebuilder:validation:MaxLength=32768
optional string message = 6;
}
// CreateOptions may be provided when creating an API object.
message CreateOptions {
// When present, indicates that modifications should not be
// persisted. An invalid or unrecognized dryRun directive will
// result in an error response and no further processing of the
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
repeated string dryRun = 1;
// fieldManager is a name associated with the actor or entity
// that is making these changes. The value must be less than or
// 128 characters long, and only contain printable characters,
// as defined by https://golang.org/pkg/unicode/#IsPrint.
// +optional
optional string fieldManager = 3;
// fieldValidation instructs the server on how to handle
// objects in the request (POST/PUT/PATCH) containing unknown
// or duplicate fields, provided that the `ServerSideFieldValidation`
// feature gate is also enabled. Valid values are:
// - Ignore: This will ignore any unknown fields that are silently
// dropped from the object, and will ignore all but the last duplicate
// field that the decoder encounters. This is the default behavior
// prior to v1.23 and is the default behavior when the
// `ServerSideFieldValidation` feature gate is disabled.
// - Warn: This will send a warning via the standard warning response
// header for each unknown field that is dropped from the object, and
// for each duplicate field that is encountered. The request will
// still succeed if there are no other errors, and will only persist
// the last of any duplicate fields. This is the default when the
// `ServerSideFieldValidation` feature gate is enabled.
// - Strict: This will fail the request with a BadRequest error if
// any unknown fields would be dropped from the object, or if any
// duplicate fields are present. The error returned from the server
// will contain all unknown and duplicate fields encountered.
// +optional
optional string fieldValidation = 4;
}
// DeleteOptions may be provided when deleting an API object.
message DeleteOptions {
// The duration in seconds before the object should be deleted. Value must be non-negative integer.
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
// specified type will be used.
// Defaults to a per object value if not specified. zero means delete immediately.
// +optional
optional int64 gracePeriodSeconds = 1;
// Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
// returned.
// +k8s:conversion-gen=false
// +optional
optional Preconditions preconditions = 2;
// Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.
// Should the dependent objects be orphaned. If true/false, the "orphan"
// finalizer will be added to/removed from the object's finalizers list.
// Either this field or PropagationPolicy may be set, but not both.
// +optional
optional bool orphanDependents = 3;
// Whether and how garbage collection will be performed.
// Either this field or OrphanDependents may be set, but not both.
// The default policy is decided by the existing finalizer set in the
// metadata.finalizers and the resource-specific default policy.
// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
// allow the garbage collector to delete the dependents in the background;
// 'Foreground' - a cascading policy that deletes all dependents in the
// foreground.
// +optional
optional string propagationPolicy = 4;
// When present, indicates that modifications should not be
// persisted. An invalid or unrecognized dryRun directive will
// result in an error response and no further processing of the
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
repeated string dryRun = 5;
}
// Duration is a wrapper around time.Duration which supports correct
// marshaling to YAML and JSON. In particular, it marshals into strings, which
// can be used as map keys in json.
message Duration {
optional int64 duration = 1;
}
// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.
//
// Each key is either a '.' representing the field itself, and will always map to an empty set,
// or a string representing a sub-field or item. The string will follow one of these four formats:
// 'f:', where is the name of a field in a struct, or key in a map
// 'v:', where is the exact json formatted value of a list item
// 'i:', where is position of a item in a list
// 'k:', where is a map of a list item's key fields to their unique values
// If a key maps to an empty Fields value, the field that key represents is part of the set.
//
// The exact format is defined in sigs.k8s.io/structured-merge-diff
// +protobuf.options.(gogoproto.goproto_stringer)=false
message FieldsV1 {
// Raw is the underlying serialization of this object.
optional bytes Raw = 1;
}
// GetOptions is the standard query options to the standard REST get call.
message GetOptions {
// resourceVersion sets a constraint on what resource versions a request may be served from.
// See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
// details.
//
// Defaults to unset
// +optional
optional string resourceVersion = 1;
}
// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying
// concepts during lookup stages without having partially valid types
//
// +protobuf.options.(gogoproto.goproto_stringer)=false
message GroupKind {
optional string group = 1;
optional string kind = 2;
}
// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying
// concepts during lookup stages without having partially valid types
//
// +protobuf.options.(gogoproto.goproto_stringer)=false
message GroupResource {
optional string group = 1;
optional string resource = 2;
}
// GroupVersion contains the "group" and the "version", which uniquely identifies the API.
//
// +protobuf.options.(gogoproto.goproto_stringer)=false
message GroupVersion {
optional string group = 1;
optional string version = 2;
}
// GroupVersion contains the "group/version" and "version" string of a version.
// It is made a struct to keep extensibility.
message GroupVersionForDiscovery {
// groupVersion specifies the API group and version in the form "group/version"
optional string groupVersion = 1;
// version specifies the version in the form of "version". This is to save
// the clients the trouble of splitting the GroupVersion.
optional string version = 2;
}
// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion
// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling
//
// +protobuf.options.(gogoproto.goproto_stringer)=false
message GroupVersionKind {
optional string group = 1;
optional string version = 2;
optional string kind = 3;
}
// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion
// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling
//
// +protobuf.options.(gogoproto.goproto_stringer)=false
message GroupVersionResource {
optional string group = 1;
optional string version = 2;
optional string resource = 3;
}
// A label selector is a label query over a set of resources. The result of matchLabels and
// matchExpressions are ANDed. An empty label selector matches all objects. A null
// label selector matches no objects.
// +structType=atomic
message LabelSelector {
// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
// map is equivalent to an element of matchExpressions, whose key field is "key", the
// operator is "In", and the values array contains only "value". The requirements are ANDed.
// +optional
map matchLabels = 1;
// matchExpressions is a list of label selector requirements. The requirements are ANDed.
// +optional
repeated LabelSelectorRequirement matchExpressions = 2;
}
// A label selector requirement is a selector that contains values, a key, and an operator that
// relates the key and values.
message LabelSelectorRequirement {
// key is the label key that the selector applies to.
// +patchMergeKey=key
// +patchStrategy=merge
optional string key = 1;
// operator represents a key's relationship to a set of values.
// Valid operators are In, NotIn, Exists and DoesNotExist.
optional string operator = 2;
// values is an array of string values. If the operator is In or NotIn,
// the values array must be non-empty. If the operator is Exists or DoesNotExist,
// the values array must be empty. This array is replaced during a strategic
// merge patch.
// +optional
repeated string values = 3;
}
// List holds a list of objects, which may not be known by the server.
message List {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
optional ListMeta metadata = 1;
// List of objects
repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
}
// ListMeta describes metadata that synthetic resources must have, including lists and
// various status objects. A resource may have only one of {ObjectMeta, ListMeta}.
message ListMeta {
// Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.
// +optional
optional string selfLink = 1;
// String that identifies the server's internal version of this object that
// can be used by clients to determine when objects have changed.
// Value must be treated as opaque by clients and passed unmodified back to the server.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
// +optional
optional string resourceVersion = 2;
// continue may be set if the user set a limit on the number of items returned, and indicates that
// the server has more data available. The value is opaque and may be used to issue another request
// to the endpoint that served this list to retrieve the next set of available objects. Continuing a
// consistent list may not be possible if the server configuration has changed or more than a few
// minutes have passed. The resourceVersion field returned when using this continue value will be
// identical to the value in the first response, unless you have received this token from an error
// message.
optional string continue = 3;
// remainingItemCount is the number of subsequent items in the list which are not included in this
// list response. If the list request contained label or field selectors, then the number of
// remaining items is unknown and the field will be left unset and omitted during serialization.
// If the list is complete (either because it is not chunking or because this is the last chunk),
// then there are no more remaining items and this field will be left unset and omitted during
// serialization.
// Servers older than v1.15 do not set this field.
// The intended use of the remainingItemCount is *estimating* the size of a collection. Clients
// should not rely on the remainingItemCount to be set or to be exact.
// +optional
optional int64 remainingItemCount = 4;
}
// ListOptions is the query options to a standard REST list call.
message ListOptions {
// A selector to restrict the list of returned objects by their labels.
// Defaults to everything.
// +optional
optional string labelSelector = 1;
// A selector to restrict the list of returned objects by their fields.
// Defaults to everything.
// +optional
optional string fieldSelector = 2;
// Watch for changes to the described resources and return them as a stream of
// add, update, and remove notifications. Specify resourceVersion.
// +optional
optional bool watch = 3;
// allowWatchBookmarks requests watch events with type "BOOKMARK".
// Servers that do not implement bookmarks may ignore this flag and
// bookmarks are sent at the server's discretion. Clients should not
// assume bookmarks are returned at any specific interval, nor may they
// assume the server will send any BOOKMARK event during a session.
// If this is not a watch, this field is ignored.
// +optional
optional bool allowWatchBookmarks = 9;
// resourceVersion sets a constraint on what resource versions a request may be served from.
// See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
// details.
//
// Defaults to unset
// +optional
optional string resourceVersion = 4;
// resourceVersionMatch determines how resourceVersion is applied to list calls.
// It is highly recommended that resourceVersionMatch be set for list calls where
// resourceVersion is set
// See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
// details.
//
// Defaults to unset
// +optional
optional string resourceVersionMatch = 10;
// Timeout for the list/watch call.
// This limits the duration of the call, regardless of any activity or inactivity.
// +optional
optional int64 timeoutSeconds = 5;
// limit is a maximum number of responses to return for a list call. If more items exist, the
// server will set the `continue` field on the list metadata to a value that can be used with the
// same initial query to retrieve the next set of results. Setting a limit may return fewer than
// the requested amount of items (up to zero items) in the event all requested objects are
// filtered out and clients should only use the presence of the continue field to determine whether
// more results are available. Servers may choose not to support the limit argument and will return
// all of the available results. If limit is specified and the continue field is empty, clients may
// assume that no more results are available. This field is not supported if watch is true.
//
// The server guarantees that the objects returned when using continue will be identical to issuing
// a single list call without a limit - that is, no objects created, modified, or deleted after the
// first request is issued will be included in any subsequent continued requests. This is sometimes
// referred to as a consistent snapshot, and ensures that a client that is using limit to receive
// smaller chunks of a very large result can ensure they see all possible objects. If objects are
// updated during a chunked list the version of the object that was present at the time the first list
// result was calculated is returned.
optional int64 limit = 7;
// The continue option should be set when retrieving more results from the server. Since this value is
// server defined, clients may only use the continue value from a previous query result with identical
// query parameters (except for the value of continue) and the server may reject a continue value it
// does not recognize. If the specified continue value is no longer valid whether due to expiration
// (generally five to fifteen minutes) or a configuration change on the server, the server will
// respond with a 410 ResourceExpired error together with a continue token. If the client needs a
// consistent list, it must restart their list without the continue field. Otherwise, the client may
// send another list request with the token received with the 410 error, the server will respond with
// a list starting from the next key, but from the latest snapshot, which is inconsistent from the
// previous list results - objects that are created, modified, or deleted after the first list request
// will be included in the response, as long as their keys are after the "next key".
//
// This field is not supported when watch is true. Clients may start a watch from the last
// resourceVersion value returned by the server and not miss any modifications.
optional string continue = 8;
}
// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource
// that the fieldset applies to.
message ManagedFieldsEntry {
// Manager is an identifier of the workflow managing these fields.
optional string manager = 1;
// Operation is the type of operation which lead to this ManagedFieldsEntry being created.
// The only valid values for this field are 'Apply' and 'Update'.
optional string operation = 2;
// APIVersion defines the version of this resource that this field set
// applies to. The format is "group/version" just like the top-level
// APIVersion field. It is necessary to track the version of a field
// set because it cannot be automatically converted.
optional string apiVersion = 3;
// Time is the timestamp of when the ManagedFields entry was added. The
// timestamp will also be updated if a field is added, the manager
// changes any of the owned fields value or removes a field. The
// timestamp does not update when a field is removed from the entry
// because another manager took it over.
// +optional
// --- BEGIN MANUAL EDIT ---
//optional Time time = 4;
optional google.protobuf.Timestamp time = 4;
// --- END MANUAL EDIT ---
// FieldsType is the discriminator for the different fields format and version.
// There is currently only one possible value: "FieldsV1"
optional string fieldsType = 6;
// FieldsV1 holds the first JSON version format as described in the "FieldsV1" type.
// +optional
optional FieldsV1 fieldsV1 = 7;
// Subresource is the name of the subresource used to update that object, or
// empty string if the object was updated through the main resource. The
// value of this field is used to distinguish between managers, even if they
// share the same name. For example, a status update will be distinct from a
// regular update using the same manager name.
// Note that the APIVersion field is not related to the Subresource field and
// it always corresponds to the version of the main resource.
optional string subresource = 8;
}
// MicroTime is version of Time with microsecond level precision.
//
// +protobuf.options.marshal=false
// +protobuf.as=Timestamp
// +protobuf.options.(gogoproto.goproto_stringer)=false
message MicroTime {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
optional int64 seconds = 1;
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive. This field may be limited in precision depending on context.
optional int32 nanos = 2;
}
// ObjectMeta is metadata that all persisted resources must have, which includes all objects
// users must create.
message ObjectMeta {
// Name must be unique within a namespace. Is required when creating resources, although
// some resources may allow a client to request the generation of an appropriate name
// automatically. Name is primarily intended for creation idempotence and configuration
// definition.
// Cannot be updated.
// More info: http://kubernetes.io/docs/user-guide/identifiers#names
// +optional
optional string name = 1;
// GenerateName is an optional prefix, used by the server, to generate a unique
// name ONLY IF the Name field has not been provided.
// If this field is used, the name returned to the client will be different
// than the name passed. This value will also be combined with a unique suffix.
// The provided value has the same validation rules as the Name field,
// and may be truncated by the length of the suffix required to make the value
// unique on the server.
//
// If this field is specified and the generated name exists, the server will return a 409.
//
// Applied only if Name is not specified.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency
// +optional
optional string generateName = 2;
// Namespace defines the space within which each name must be unique. An empty namespace is
// equivalent to the "default" namespace, but "default" is the canonical representation.
// Not all objects are required to be scoped to a namespace - the value of this field for
// those objects will be empty.
//
// Must be a DNS_LABEL.
// Cannot be updated.
// More info: http://kubernetes.io/docs/user-guide/namespaces
// +optional
optional string namespace = 3;
// Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.
// +optional
optional string selfLink = 4;
// UID is the unique in time and space value for this object. It is typically generated by
// the server on successful creation of a resource and is not allowed to change on PUT
// operations.
//
// Populated by the system.
// Read-only.
// More info: http://kubernetes.io/docs/user-guide/identifiers#uids
// +optional
optional string uid = 5;
// An opaque value that represents the internal version of this object that can
// be used by clients to determine when objects have changed. May be used for optimistic
// concurrency, change detection, and the watch operation on a resource or set of resources.
// Clients must treat these values as opaque and passed unmodified back to the server.
// They may only be valid for a particular resource or set of resources.
//
// Populated by the system.
// Read-only.
// Value must be treated as opaque by clients and .
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
// +optional
optional string resourceVersion = 6;
// A sequence number representing a specific generation of the desired state.
// Populated by the system. Read-only.
// +optional
optional int64 generation = 7;
// CreationTimestamp is a timestamp representing the server time when this object was
// created. It is not guaranteed to be set in happens-before order across separate operations.
// Clients may not set this value. It is represented in RFC3339 form and is in UTC.
//
// Populated by the system.
// Read-only.
// Null for lists.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
// --- BEGIN MANUAL EDIT ---
//optional Time creationTimestamp = 8;
optional google.protobuf.Timestamp creationTimestamp = 8;
// --- END MANUAL EDIT ---
// DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
// field is set by the server when a graceful deletion is requested by the user, and is not
// directly settable by a client. The resource is expected to be deleted (no longer visible
// from resource lists, and not reachable by name) after the time in this field, once the
// finalizers list is empty. As long as the finalizers list contains items, deletion is blocked.
// Once the deletionTimestamp is set, this value may not be unset or be set further into the
// future, although it may be shortened or the resource may be deleted prior to this time.
// For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react
// by sending a graceful termination signal to the containers in the pod. After that 30 seconds,
// the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup,
// remove the pod from the API. In the presence of network partitions, this object may still
// exist after this timestamp, until an administrator or automated process can determine the
// resource is fully terminated.
// If not set, graceful deletion of the object has not been requested.
//
// Populated by the system when a graceful deletion is requested.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
// --- BEGIN MANUAL EDIT ---
//optional Time deletionTimestamp = 9;
optional google.protobuf.Timestamp deletionTimestamp = 9;
// --- END MANUAL EDIT ---
// Number of seconds allowed for this object to gracefully terminate before
// it will be removed from the system. Only set when deletionTimestamp is also set.
// May only be shortened.
// Read-only.
// +optional
optional int64 deletionGracePeriodSeconds = 10;
// Map of string keys and values that can be used to organize and categorize
// (scope and select) objects. May match selectors of replication controllers
// and services.
// More info: http://kubernetes.io/docs/user-guide/labels
// +optional
map labels = 11;
// Annotations is an unstructured key value map stored with a resource that may be
// set by external tools to store and retrieve arbitrary metadata. They are not
// queryable and should be preserved when modifying objects.
// More info: http://kubernetes.io/docs/user-guide/annotations
// +optional
map annotations = 12;
// List of objects depended by this object. If ALL objects in the list have
// been deleted, this object will be garbage collected. If this object is managed by a controller,
// then an entry in this list will point to this controller, with the controller field set to true.
// There cannot be more than one managing controller.
// +optional
// +patchMergeKey=uid
// +patchStrategy=merge
repeated OwnerReference ownerReferences = 13;
// Must be empty before the object is deleted from the registry. Each entry
// is an identifier for the responsible component that will remove the entry
// from the list. If the deletionTimestamp of the object is non-nil, entries
// in this list can only be removed.
// Finalizers may be processed and removed in any order. Order is NOT enforced
// because it introduces significant risk of stuck finalizers.
// finalizers is a shared field, any actor with permission can reorder it.
// If the finalizer list is processed in order, then this can lead to a situation
// in which the component responsible for the first finalizer in the list is
// waiting for a signal (field value, external system, or other) produced by a
// component responsible for a finalizer later in the list, resulting in a deadlock.
// Without enforced ordering finalizers are free to order amongst themselves and
// are not vulnerable to ordering changes in the list.
// +optional
// +patchStrategy=merge
repeated string finalizers = 14;
// ManagedFields maps workflow-id and version to the set of fields
// that are managed by that workflow. This is mostly for internal
// housekeeping, and users typically shouldn't need to set or
// understand this field. A workflow can be the user's name, a
// controller's name, or the name of a specific apply path like
// "ci-cd". The set of fields is always in the version that the
// workflow used when modifying the object.
//
// +optional
repeated ManagedFieldsEntry managedFields = 17;
}
// OwnerReference contains enough information to let you identify an owning
// object. An owning object must be in the same namespace as the dependent, or
// be cluster-scoped, so there is no namespace field.
// +structType=atomic
message OwnerReference {
// API version of the referent.
optional string apiVersion = 5;
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
optional string kind = 1;
// Name of the referent.
// More info: http://kubernetes.io/docs/user-guide/identifiers#names
optional string name = 3;
// UID of the referent.
// More info: http://kubernetes.io/docs/user-guide/identifiers#uids
optional string uid = 4;
// If true, this reference points to the managing controller.
// +optional
optional bool controller = 6;
// If true, AND if the owner has the "foregroundDeletion" finalizer, then
// the owner cannot be deleted from the key-value store until this
// reference is removed.
// See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion
// for how the garbage collector interacts with this field and enforces the foreground deletion.
// Defaults to false.
// To set this field, a user needs "delete" permission of the owner,
// otherwise 422 (Unprocessable Entity) will be returned.
// +optional
optional bool blockOwnerDeletion = 7;
}
// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients
// to get access to a particular ObjectMeta schema without knowing the details of the version.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message PartialObjectMetadata {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional ObjectMeta metadata = 1;
}
// PartialObjectMetadataList contains a list of objects containing only their metadata
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message PartialObjectMetadataList {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
optional ListMeta metadata = 1;
// items contains each of the included items.
repeated PartialObjectMetadata items = 2;
}
// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.
message Patch {
}
// PatchOptions may be provided when patching an API object.
// PatchOptions is meant to be a superset of UpdateOptions.
message PatchOptions {
// When present, indicates that modifications should not be
// persisted. An invalid or unrecognized dryRun directive will
// result in an error response and no further processing of the
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
repeated string dryRun = 1;
// Force is going to "force" Apply requests. It means user will
// re-acquire conflicting fields owned by other people. Force
// flag must be unset for non-apply patch requests.
// +optional
optional bool force = 2;
// fieldManager is a name associated with the actor or entity
// that is making these changes. The value must be less than or
// 128 characters long, and only contain printable characters,
// as defined by https://golang.org/pkg/unicode/#IsPrint. This
// field is required for apply requests
// (application/apply-patch) but optional for non-apply patch
// types (JsonPatch, MergePatch, StrategicMergePatch).
// +optional
optional string fieldManager = 3;
// fieldValidation instructs the server on how to handle
// objects in the request (POST/PUT/PATCH) containing unknown
// or duplicate fields, provided that the `ServerSideFieldValidation`
// feature gate is also enabled. Valid values are:
// - Ignore: This will ignore any unknown fields that are silently
// dropped from the object, and will ignore all but the last duplicate
// field that the decoder encounters. This is the default behavior
// prior to v1.23 and is the default behavior when the
// `ServerSideFieldValidation` feature gate is disabled.
// - Warn: This will send a warning via the standard warning response
// header for each unknown field that is dropped from the object, and
// for each duplicate field that is encountered. The request will
// still succeed if there are no other errors, and will only persist
// the last of any duplicate fields. This is the default when the
// `ServerSideFieldValidation` feature gate is enabled.
// - Strict: This will fail the request with a BadRequest error if
// any unknown fields would be dropped from the object, or if any
// duplicate fields are present. The error returned from the server
// will contain all unknown and duplicate fields encountered.
// +optional
optional string fieldValidation = 4;
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
message Preconditions {
// Specifies the target UID.
// +optional
optional string uid = 1;
// Specifies the target ResourceVersion
// +optional
optional string resourceVersion = 2;
}
// RootPaths lists the paths available at root.
// For example: "/healthz", "/apis".
message RootPaths {
// paths are the paths available at root.
repeated string paths = 1;
}
// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.
message ServerAddressByClientCIDR {
// The CIDR with which clients can match their IP to figure out the server address that they should use.
optional string clientCIDR = 1;
// Address of this server, suitable for a client that matches the above CIDR.
// This can be a hostname, hostname:port, IP or IP:port.
optional string serverAddress = 2;
}
// Status is a return value for calls that don't return other objects.
message Status {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
optional ListMeta metadata = 1;
// Status of the operation.
// One of: "Success" or "Failure".
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
optional string status = 2;
// A human-readable description of the status of this operation.
// +optional
optional string message = 3;
// A machine-readable description of why this operation is in the
// "Failure" status. If this value is empty there
// is no information available. A Reason clarifies an HTTP status
// code but does not override it.
// +optional
optional string reason = 4;
// Extended data associated with the reason. Each reason may define its
// own extended details. This field is optional and the data returned
// is not guaranteed to conform to any schema except that defined by
// the reason type.
// +optional
optional StatusDetails details = 5;
// Suggested HTTP return code for this status, 0 if not set.
// +optional
optional int32 code = 6;
}
// StatusCause provides more information about an api.Status failure, including
// cases when multiple errors are encountered.
message StatusCause {
// A machine-readable description of the cause of the error. If this value is
// empty there is no information available.
// +optional
optional string reason = 1;
// A human-readable description of the cause of the error. This field may be
// presented as-is to a reader.
// +optional
optional string message = 2;
// The field of the resource that has caused this error, as named by its JSON
// serialization. May include dot and postfix notation for nested attributes.
// Arrays are zero-indexed. Fields may appear more than once in an array of
// causes due to fields having multiple errors.
// Optional.
//
// Examples:
// "name" - the field "name" on the current resource
// "items[0].name" - the field "name" on the first array entry in "items"
// +optional
optional string field = 3;
}
// StatusDetails is a set of additional properties that MAY be set by the
// server to provide additional information about a response. The Reason
// field of a Status object defines what attributes will be set. Clients
// must ignore fields that do not match the defined type of each attribute,
// and should assume that any attribute may be empty, invalid, or under
// defined.
message StatusDetails {
// The name attribute of the resource associated with the status StatusReason
// (when there is a single name which can be described).
// +optional
optional string name = 1;
// The group attribute of the resource associated with the status StatusReason.
// +optional
optional string group = 2;
// The kind attribute of the resource associated with the status StatusReason.
// On some operations may differ from the requested resource Kind.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
optional string kind = 3;
// UID of the resource.
// (when there is a single resource which can be described).
// More info: http://kubernetes.io/docs/user-guide/identifiers#uids
// +optional
optional string uid = 6;
// The Causes array includes more details associated with the StatusReason
// failure. Not all StatusReasons may provide detailed causes.
// +optional
repeated StatusCause causes = 4;
// If specified, the time in seconds before the operation should be retried. Some errors may indicate
// the client must take an alternate action - for those errors this field may indicate how long to wait
// before taking the alternate action.
// +optional
optional int32 retryAfterSeconds = 5;
}
// TableOptions are used when a Table is requested by the caller.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message TableOptions {
// includeObject decides whether to include each object along with its columnar information.
// Specifying "None" will return no object, specifying "Object" will return the full object contents, and
// specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind
// in version v1beta1 of the meta.k8s.io API group.
optional string includeObject = 1;
}
// Time is a wrapper around time.Time which supports correct
// marshaling to YAML and JSON. Wrappers are provided for many
// of the factory methods that the time package offers.
//
// +protobuf.options.marshal=false
// +protobuf.as=Timestamp
// +protobuf.options.(gogoproto.goproto_stringer)=false
message Time {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
optional int64 seconds = 1;
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive. This field may be limited in precision depending on context.
optional int32 nanos = 2;
}
// Timestamp is a struct that is equivalent to Time, but intended for
// protobuf marshalling/unmarshalling. It is generated into a serialization
// that matches Time. Do not use in Go structs.
message Timestamp {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
optional int64 seconds = 1;
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive. This field may be limited in precision depending on context.
optional int32 nanos = 2;
}
// TypeMeta describes an individual object in an API response or request
// with strings representing the type of the object and its API schema version.
// Structures that are versioned or persisted should inline TypeMeta.
//
// +k8s:deepcopy-gen=false
message TypeMeta {
// Kind is a string value representing the REST resource this object represents.
// Servers may infer this from the endpoint the client submits requests to.
// Cannot be updated.
// In CamelCase.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
optional string kind = 1;
// APIVersion defines the versioned schema of this representation of an object.
// Servers should convert recognized schemas to the latest internal value, and
// may reject unrecognized values.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
// +optional
optional string apiVersion = 2;
}
// UpdateOptions may be provided when updating an API object.
// All fields in UpdateOptions should also be present in PatchOptions.
message UpdateOptions {
// When present, indicates that modifications should not be
// persisted. An invalid or unrecognized dryRun directive will
// result in an error response and no further processing of the
// request. Valid values are:
// - All: all dry run stages will be processed
// +optional
repeated string dryRun = 1;
// fieldManager is a name associated with the actor or entity
// that is making these changes. The value must be less than or
// 128 characters long, and only contain printable characters,
// as defined by https://golang.org/pkg/unicode/#IsPrint.
// +optional
optional string fieldManager = 2;
// fieldValidation instructs the server on how to handle
// objects in the request (POST/PUT/PATCH) containing unknown
// or duplicate fields, provided that the `ServerSideFieldValidation`
// feature gate is also enabled. Valid values are:
// - Ignore: This will ignore any unknown fields that are silently
// dropped from the object, and will ignore all but the last duplicate
// field that the decoder encounters. This is the default behavior
// prior to v1.23 and is the default behavior when the
// `ServerSideFieldValidation` feature gate is disabled.
// - Warn: This will send a warning via the standard warning response
// header for each unknown field that is dropped from the object, and
// for each duplicate field that is encountered. The request will
// still succeed if there are no other errors, and will only persist
// the last of any duplicate fields. This is the default when the
// `ServerSideFieldValidation` feature gate is enabled.
// - Strict: This will fail the request with a BadRequest error if
// any unknown fields would be dropped from the object, or if any
// duplicate fields are present. The error returned from the server
// will contain all unknown and duplicate fields encountered.
// +optional
optional string fieldValidation = 3;
}
// Verbs masks the value so protobuf can generate
//
// +protobuf.nullable=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
message Verbs {
// items, if empty, will result in an empty slice
repeated string items = 1;
}
// Event represents a single event to a watched resource.
//
// +protobuf=true
// +k8s:deepcopy-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message WatchEvent {
optional string type = 1;
// Object is:
// * If Type is Added or Modified: the new state of the object.
// * If Type is Deleted: the state of the object immediately before deletion.
// * If Type is Error: *Status is recommended; other types may make sense
// depending on context.
optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2;
}
================================================
FILE: third_party/kubernetes_proto/runtime/BUILD.bazel
================================================
load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library")
load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
licenses(["notice"])
proto_library(
name = "runtime_proto",
srcs = ["generated.proto"],
visibility = ["//visibility:public"],
)
go_proto_library(
name = "runtime_go_proto",
compilers = ["@io_bazel_rules_go//proto:go_grpc"],
importpath = "github.com/googlecloudrobotics/core/third_party/kubernetes_proto/runtime",
proto = ":runtime_proto",
visibility = ["//visibility:public"],
)
go_library(
name = "go_default_library",
embed = [":runtime_go_proto"],
importpath = "github.com/googlecloudrobotics/core/third_party/kubernetes_proto/runtime",
visibility = ["//visibility:public"],
)
cc_proto_library(
name = "runtime_cc_proto",
visibility = ["//visibility:public"],
deps = [":runtime_proto"],
)
================================================
FILE: third_party/kubernetes_proto/runtime/generated.proto
================================================
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = "proto2";
package k8s.io.apimachinery.pkg.runtime;
// Package-wide variables from generator "generated".
option go_package = "k8s.io/apimachinery/pkg/runtime";
// RawExtension is used to hold extensions in external versions.
//
// To use this, make a field which has RawExtension as its type in your external, versioned
// struct, and Object in your internal struct. You also need to register your
// various plugin types.
//
// // Internal package:
//
// type MyAPIObject struct {
// runtime.TypeMeta `json:",inline"`
// MyPlugin runtime.Object `json:"myPlugin"`
// }
//
// type PluginA struct {
// AOption string `json:"aOption"`
// }
//
// // External package:
//
// type MyAPIObject struct {
// runtime.TypeMeta `json:",inline"`
// MyPlugin runtime.RawExtension `json:"myPlugin"`
// }
//
// type PluginA struct {
// AOption string `json:"aOption"`
// }
//
// // On the wire, the JSON will look something like this:
//
// {
// "kind":"MyAPIObject",
// "apiVersion":"v1",
// "myPlugin": {
// "kind":"PluginA",
// "aOption":"foo",
// },
// }
//
// So what happens? Decode first uses json or yaml to unmarshal the serialized data into
// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.
// The next step is to copy (using pkg/conversion) into the internal struct. The runtime
// package's DefaultScheme has conversion functions installed which will unpack the
// JSON stored in RawExtension, turning it into the correct object type, and storing it
// in the Object. (TODO: In the case where the object is of an unknown type, a
// runtime.Unknown object will be created and stored.)
//
// +k8s:deepcopy-gen=true
// +protobuf=true
// +k8s:openapi-gen=true
message RawExtension {
// Raw is the underlying serialization of this object.
//
// TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data.
optional bytes raw = 1;
}
// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type,
// like this:
//
// type MyAwesomeAPIObject struct {
// runtime.TypeMeta `json:",inline"`
// ... // other fields
// }
//
// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind
//
// TypeMeta is provided here for convenience. You may use it directly from this package or define
// your own with the same fields.
//
// +k8s:deepcopy-gen=false
// +protobuf=true
// +k8s:openapi-gen=true
message TypeMeta {
// +optional
optional string apiVersion = 1;
// +optional
optional string kind = 2;
}
// Unknown allows api objects with unknown types to be passed-through. This can be used
// to deal with the API objects from a plug-in. Unknown objects still have functioning
// TypeMeta features-- kind, version, etc.
// TODO: Make this object have easy access to field based accessors and settors for
// metadata and field mutatation.
//
// +k8s:deepcopy-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +protobuf=true
// +k8s:openapi-gen=true
message Unknown {
optional TypeMeta typeMeta = 1;
// Raw will hold the complete serialized object which couldn't be matched
// with a registered type. Most likely, nothing should be done with this
// except for passing it through the system.
optional bytes raw = 2;
// ContentEncoding is encoding used to encode 'Raw' data.
// Unspecified means no encoding.
optional string contentEncoding = 3;
// ContentType is serialization method used to serialize 'Raw'.
// Unspecified means ContentTypeJSON.
optional string contentType = 4;
}
================================================
FILE: third_party/kubernetes_proto/schema/BUILD.bazel
================================================
load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library")
load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
licenses(["notice"])
proto_library(
name = "schema_proto",
srcs = ["generated.proto"],
visibility = ["//visibility:public"],
)
go_proto_library(
name = "schema_go_proto",
compilers = ["@io_bazel_rules_go//proto:go_grpc"],
importpath = "github.com/googlecloudrobotics/core/third_party/kubernetes_proto/schema",
proto = ":schema_proto",
visibility = ["//visibility:public"],
)
go_library(
name = "go_default_library",
embed = [":schema_go_proto"],
importpath = "github.com/googlecloudrobotics/core/third_party/kubernetes_proto/schema",
visibility = ["//visibility:public"],
)
cc_proto_library(
name = "schema_cc_proto",
visibility = ["//visibility:public"],
deps = [":schema_proto"],
)
================================================
FILE: third_party/kubernetes_proto/schema/generated.proto
================================================
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = "proto2";
package k8s.io.apimachinery.pkg.runtime.schema;
// Package-wide variables from generator "generated".
option go_package = "k8s.io/apimachinery/pkg/runtime/schema";
================================================
FILE: third_party/terraform.BUILD
================================================
exports_files(["terraform"])