Showing preview only (6,589K chars total). Download the full file or copy to clipboard to get everything.
Repository: timebertt/kubernetes-controller-sharding
Branch: main
Commit: 9494dea08d26
Files: 484
Total size: 6.2 MB
Directory structure:
gitextract_xbddqqqb/
├── .gitattributes
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug.md
│ │ └── enhancement.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── release.yaml
│ ├── renovate.json5
│ └── workflows/
│ ├── e2e.yaml
│ ├── images.yaml
│ ├── release-notes.yaml
│ ├── renovate.yaml
│ └── verify.yaml
├── .gitignore
├── .golangci.yaml
├── .run/
│ ├── experiment (kind).run.xml
│ ├── shard (kind).run.xml
│ ├── sharder (kind).run.xml
│ └── webhosting-operator (kind).run.xml
├── LICENSE
├── Makefile
├── README.md
├── cmd/
│ ├── checksum-controller/
│ │ ├── main.go
│ │ └── reconciler.go
│ └── sharder/
│ ├── app/
│ │ ├── app.go
│ │ └── options.go
│ └── main.go
├── config/
│ ├── README.md
│ ├── certificate/
│ │ ├── certificate.yaml
│ │ ├── issuer.yaml
│ │ └── kustomization.yaml
│ ├── crds/
│ │ ├── kustomization.yaml
│ │ ├── namespace.yaml
│ │ └── sharding.timebertt.dev_controllerrings.yaml
│ ├── default/
│ │ └── kustomization.yaml
│ ├── monitoring/
│ │ ├── kustomization.yaml
│ │ ├── prometheus_rbac.yaml
│ │ └── servicemonitor.yaml
│ ├── rbac/
│ │ ├── kustomization.yaml
│ │ ├── leader_election.yaml
│ │ ├── metrics_auth.yaml
│ │ ├── pprof_reader.yaml
│ │ ├── role.yaml
│ │ ├── rolebinding.yaml
│ │ └── serviceaccount.yaml
│ └── sharder/
│ ├── config.yaml
│ ├── deployment.yaml
│ ├── kustomization.yaml
│ ├── poddisruptionbudget.yaml
│ └── service.yaml
├── docs/
│ ├── README.md
│ ├── design.md
│ ├── development.md
│ ├── evaluation.md
│ ├── getting-started.md
│ ├── implement-sharding.md
│ ├── installation.md
│ └── monitoring.md
├── go.mod
├── go.sum
├── go.work
├── go.work.sum
├── hack/
│ ├── boilerplate.go.txt
│ ├── ci-common.sh
│ ├── ci-e2e-kind.sh
│ ├── config/
│ │ ├── README.md
│ │ ├── cert-manager/
│ │ │ ├── kustomization.yaml
│ │ │ ├── patch-mutatingwebhook.yaml
│ │ │ ├── patch-validatingwebhook.yaml
│ │ │ └── resources/
│ │ │ ├── cluster-issuer.yaml
│ │ │ └── kustomization.yaml
│ │ ├── certificates/
│ │ │ └── host/
│ │ │ ├── config.json
│ │ │ ├── generate.sh
│ │ │ ├── kustomization.yaml
│ │ │ ├── webhook-ca-key.pem
│ │ │ ├── webhook-ca.json
│ │ │ ├── webhook-ca.pem
│ │ │ ├── webhook-server-key.pem
│ │ │ ├── webhook-server.json
│ │ │ └── webhook-server.pem
│ │ ├── checksum-controller/
│ │ │ ├── controller/
│ │ │ │ ├── deployment.yaml
│ │ │ │ ├── kustomization.yaml
│ │ │ │ ├── rbac.yaml
│ │ │ │ └── serviceaccount.yaml
│ │ │ └── controllerring/
│ │ │ ├── controllerring.yaml
│ │ │ ├── kustomization.yaml
│ │ │ └── sharder_rbac.yaml
│ │ ├── external-dns/
│ │ │ ├── kustomization.yaml
│ │ │ ├── namespace.yaml
│ │ │ └── patch-deployment.yaml
│ │ ├── ingress-nginx/
│ │ │ ├── default/
│ │ │ │ ├── kustomization.yaml
│ │ │ │ ├── patch_controller_resources.yaml
│ │ │ │ └── patch_default_ingress_class.yaml
│ │ │ ├── kind/
│ │ │ │ ├── kustomization.yaml
│ │ │ │ └── patch_service_nodeport.yaml
│ │ │ └── shoot/
│ │ │ ├── certificate.yaml
│ │ │ ├── kustomization.yaml
│ │ │ └── patch_service.yaml
│ │ ├── kind-config.yaml
│ │ ├── kyverno/
│ │ │ └── kustomization.yaml
│ │ ├── monitoring/
│ │ │ ├── crds/
│ │ │ │ ├── 0alertmanagerConfigCustomResourceDefinition.yaml
│ │ │ │ ├── 0alertmanagerCustomResourceDefinition.yaml
│ │ │ │ ├── 0podmonitorCustomResourceDefinition.yaml
│ │ │ │ ├── 0probeCustomResourceDefinition.yaml
│ │ │ │ ├── 0prometheusCustomResourceDefinition.yaml
│ │ │ │ ├── 0prometheusagentCustomResourceDefinition.yaml
│ │ │ │ ├── 0prometheusruleCustomResourceDefinition.yaml
│ │ │ │ ├── 0scrapeconfigCustomResourceDefinition.yaml
│ │ │ │ ├── 0servicemonitorCustomResourceDefinition.yaml
│ │ │ │ ├── 0thanosrulerCustomResourceDefinition.yaml
│ │ │ │ ├── README.md
│ │ │ │ └── kustomization.yaml
│ │ │ ├── default/
│ │ │ │ ├── dashboards/
│ │ │ │ │ ├── client-go.json
│ │ │ │ │ ├── controller-details.json
│ │ │ │ │ └── controller-runtime.json
│ │ │ │ ├── ensure-admin-password.sh
│ │ │ │ ├── grafana_ingress.yaml
│ │ │ │ ├── kustomization.yaml
│ │ │ │ ├── namespace.yaml
│ │ │ │ ├── patch_grafana_admin.yaml
│ │ │ │ ├── patch_grafana_networkpolicy.yaml
│ │ │ │ ├── patch_kubelet_metrics.yaml
│ │ │ │ ├── patch_kubestatemetrics.yaml
│ │ │ │ ├── patch_kubestatemetrics_servicemonitor.yaml
│ │ │ │ ├── patch_prometheus.yaml
│ │ │ │ └── rbac-proxy_clusterrole.yaml
│ │ │ ├── grafana-sidecar/
│ │ │ │ ├── dashboards-sidecar.yaml
│ │ │ │ ├── kustomization.yaml
│ │ │ │ ├── patch_grafana_sidecar.yaml
│ │ │ │ ├── sidecar_clusterrole.yaml
│ │ │ │ └── sidecar_clusterrolebinding.yaml
│ │ │ ├── kube-prometheus/
│ │ │ │ ├── README.md
│ │ │ │ ├── blackboxExporter-clusterRole.yaml
│ │ │ │ ├── blackboxExporter-clusterRoleBinding.yaml
│ │ │ │ ├── blackboxExporter-configuration.yaml
│ │ │ │ ├── blackboxExporter-deployment.yaml
│ │ │ │ ├── blackboxExporter-networkPolicy.yaml
│ │ │ │ ├── blackboxExporter-service.yaml
│ │ │ │ ├── blackboxExporter-serviceAccount.yaml
│ │ │ │ ├── blackboxExporter-serviceMonitor.yaml
│ │ │ │ ├── grafana-config.yaml
│ │ │ │ ├── grafana-dashboardDatasources.yaml
│ │ │ │ ├── grafana-dashboardDefinitions.yaml
│ │ │ │ ├── grafana-dashboardSources.yaml
│ │ │ │ ├── grafana-deployment.yaml
│ │ │ │ ├── grafana-networkPolicy.yaml
│ │ │ │ ├── grafana-prometheusRule.yaml
│ │ │ │ ├── grafana-service.yaml
│ │ │ │ ├── grafana-serviceAccount.yaml
│ │ │ │ ├── grafana-serviceMonitor.yaml
│ │ │ │ ├── kubePrometheus-prometheusRule.yaml
│ │ │ │ ├── kubeStateMetrics-clusterRole.yaml
│ │ │ │ ├── kubeStateMetrics-clusterRoleBinding.yaml
│ │ │ │ ├── kubeStateMetrics-deployment.yaml
│ │ │ │ ├── kubeStateMetrics-networkPolicy.yaml
│ │ │ │ ├── kubeStateMetrics-prometheusRule.yaml
│ │ │ │ ├── kubeStateMetrics-service.yaml
│ │ │ │ ├── kubeStateMetrics-serviceAccount.yaml
│ │ │ │ ├── kubeStateMetrics-serviceMonitor.yaml
│ │ │ │ ├── kubernetesControlPlane-prometheusRule.yaml
│ │ │ │ ├── kubernetesControlPlane-serviceMonitorApiserver.yaml
│ │ │ │ ├── kubernetesControlPlane-serviceMonitorCoreDNS.yaml
│ │ │ │ ├── kubernetesControlPlane-serviceMonitorKubeControllerManager.yaml
│ │ │ │ ├── kubernetesControlPlane-serviceMonitorKubeScheduler.yaml
│ │ │ │ ├── kubernetesControlPlane-serviceMonitorKubelet.yaml
│ │ │ │ ├── kustomization.yaml
│ │ │ │ ├── nodeExporter-clusterRole.yaml
│ │ │ │ ├── nodeExporter-clusterRoleBinding.yaml
│ │ │ │ ├── nodeExporter-daemonset.yaml
│ │ │ │ ├── nodeExporter-networkPolicy.yaml
│ │ │ │ ├── nodeExporter-prometheusRule.yaml
│ │ │ │ ├── nodeExporter-service.yaml
│ │ │ │ ├── nodeExporter-serviceAccount.yaml
│ │ │ │ ├── nodeExporter-serviceMonitor.yaml
│ │ │ │ ├── prometheus-clusterRole.yaml
│ │ │ │ ├── prometheus-clusterRoleBinding.yaml
│ │ │ │ ├── prometheus-networkPolicy.yaml
│ │ │ │ ├── prometheus-prometheus.yaml
│ │ │ │ ├── prometheus-prometheusRule.yaml
│ │ │ │ ├── prometheus-roleBindingConfig.yaml
│ │ │ │ ├── prometheus-roleBindingSpecificNamespaces.yaml
│ │ │ │ ├── prometheus-roleConfig.yaml
│ │ │ │ ├── prometheus-roleSpecificNamespaces.yaml
│ │ │ │ ├── prometheus-service.yaml
│ │ │ │ ├── prometheus-serviceAccount.yaml
│ │ │ │ ├── prometheus-serviceMonitor.yaml
│ │ │ │ ├── prometheusAdapter-clusterRole.yaml
│ │ │ │ ├── prometheusAdapter-clusterRoleAggregatedMetricsReader.yaml
│ │ │ │ ├── prometheusAdapter-clusterRoleBinding.yaml
│ │ │ │ ├── prometheusAdapter-clusterRoleBindingDelegator.yaml
│ │ │ │ ├── prometheusAdapter-clusterRoleServerResources.yaml
│ │ │ │ ├── prometheusAdapter-configMap.yaml
│ │ │ │ ├── prometheusAdapter-deployment.yaml
│ │ │ │ ├── prometheusAdapter-networkPolicy.yaml
│ │ │ │ ├── prometheusAdapter-podDisruptionBudget.yaml
│ │ │ │ ├── prometheusAdapter-roleBindingAuthReader.yaml
│ │ │ │ ├── prometheusAdapter-service.yaml
│ │ │ │ ├── prometheusAdapter-serviceAccount.yaml
│ │ │ │ ├── prometheusAdapter-serviceMonitor.yaml
│ │ │ │ ├── prometheusOperator-clusterRole.yaml
│ │ │ │ ├── prometheusOperator-clusterRoleBinding.yaml
│ │ │ │ ├── prometheusOperator-deployment.yaml
│ │ │ │ ├── prometheusOperator-networkPolicy.yaml
│ │ │ │ ├── prometheusOperator-prometheusRule.yaml
│ │ │ │ ├── prometheusOperator-service.yaml
│ │ │ │ ├── prometheusOperator-serviceAccount.yaml
│ │ │ │ └── prometheusOperator-serviceMonitor.yaml
│ │ │ ├── shoot/
│ │ │ │ ├── kustomization.yaml
│ │ │ │ ├── patch_prometheus.yaml
│ │ │ │ └── storageclass.yaml
│ │ │ └── update.sh
│ │ ├── policy/
│ │ │ ├── ci/
│ │ │ │ ├── kustomization.yaml
│ │ │ │ └── no-requests.yaml
│ │ │ ├── controlplane/
│ │ │ │ ├── etcd-main.yaml
│ │ │ │ ├── kube-apiserver.yaml
│ │ │ │ ├── kube-controller-manager.yaml
│ │ │ │ └── kustomization.yaml
│ │ │ └── shoot/
│ │ │ ├── kustomization.yaml
│ │ │ └── sharder-scheduling.yaml
│ │ ├── profiling/
│ │ │ ├── ensure-admin-password.sh
│ │ │ ├── kustomization.yaml
│ │ │ ├── parca_config.yaml
│ │ │ ├── parca_ingress.yaml
│ │ │ ├── parca_pvc.yaml
│ │ │ ├── patch_deployment_pvc.yaml
│ │ │ └── rbac_sharder.yaml
│ │ ├── sharder/
│ │ │ ├── devel/
│ │ │ │ └── kustomization.yaml
│ │ │ └── host/
│ │ │ └── config.yaml
│ │ ├── shoot.yaml
│ │ └── skaffold.yaml
│ ├── prepare-image-metadata.sh
│ ├── test-e2e.env
│ ├── test-e2e.sh
│ ├── test-integration.env
│ ├── test-integration.sh
│ ├── test.sh
│ ├── tools.go
│ ├── tools.mk
│ ├── update-codegen.sh
│ └── vgopath-setup.sh
├── pkg/
│ ├── apis/
│ │ ├── config/
│ │ │ ├── doc.go
│ │ │ └── v1alpha1/
│ │ │ ├── defaults.go
│ │ │ ├── defaults_test.go
│ │ │ ├── doc.go
│ │ │ ├── register.go
│ │ │ ├── types.go
│ │ │ ├── v1alpha1_suite_test.go
│ │ │ ├── zz_generated.deepcopy.go
│ │ │ └── zz_generated.defaults.go
│ │ └── sharding/
│ │ ├── doc.go
│ │ └── v1alpha1/
│ │ ├── constants.go
│ │ ├── constants_test.go
│ │ ├── doc.go
│ │ ├── register.go
│ │ ├── types_controllerring.go
│ │ ├── types_controllerring_test.go
│ │ ├── v1alpha1_suite_test.go
│ │ └── zz_generated.deepcopy.go
│ ├── controller/
│ │ ├── add.go
│ │ ├── controllerring/
│ │ │ ├── add.go
│ │ │ ├── add_test.go
│ │ │ ├── controllerring_suite_test.go
│ │ │ ├── reconciler.go
│ │ │ └── reconciler_test.go
│ │ ├── sharder/
│ │ │ ├── add.go
│ │ │ ├── reconciler.go
│ │ │ ├── reconciler_test.go
│ │ │ └── sharder_suite_test.go
│ │ └── shardlease/
│ │ ├── add.go
│ │ ├── add_test.go
│ │ ├── reconciler.go
│ │ └── shardlease_suite_test.go
│ ├── metrics/
│ │ ├── add.go
│ │ ├── controllerring.go
│ │ ├── exporter/
│ │ │ └── exporter.go
│ │ ├── operations.go
│ │ └── shard.go
│ ├── shard/
│ │ ├── controller/
│ │ │ ├── builder.go
│ │ │ ├── builder_test.go
│ │ │ ├── controller_suite_test.go
│ │ │ ├── predicate.go
│ │ │ ├── predicate_test.go
│ │ │ ├── reconciler.go
│ │ │ └── reconciler_test.go
│ │ └── lease/
│ │ ├── lease.go
│ │ ├── lease_suite_test.go
│ │ └── lease_test.go
│ ├── sharding/
│ │ ├── consistenthash/
│ │ │ ├── benchmark_test.go
│ │ │ ├── consistenthash_suite_test.go
│ │ │ ├── ring.go
│ │ │ └── ring_test.go
│ │ ├── handler/
│ │ │ ├── controllerring.go
│ │ │ ├── controllerring_test.go
│ │ │ ├── handler_suite_test.go
│ │ │ ├── lease.go
│ │ │ └── lease_test.go
│ │ ├── key/
│ │ │ ├── key.go
│ │ │ ├── key_suite_test.go
│ │ │ └── key_test.go
│ │ ├── leases/
│ │ │ ├── leases_suite_test.go
│ │ │ ├── shards.go
│ │ │ ├── shards_test.go
│ │ │ ├── state.go
│ │ │ ├── state_test.go
│ │ │ ├── times.go
│ │ │ └── times_test.go
│ │ ├── predicate/
│ │ │ ├── controllerring.go
│ │ │ ├── controllerring_test.go
│ │ │ ├── lease.go
│ │ │ ├── lease_test.go
│ │ │ └── predicate_suite_test.go
│ │ └── ring/
│ │ ├── ring.go
│ │ ├── ring_suite_test.go
│ │ └── ring_test.go
│ ├── utils/
│ │ ├── client/
│ │ │ ├── client_suite_test.go
│ │ │ ├── options.go
│ │ │ ├── options_test.go
│ │ │ └── scheme.go
│ │ ├── errors/
│ │ │ ├── errors_suite_test.go
│ │ │ ├── multi.go
│ │ │ └── multi_test.go
│ │ ├── healthz/
│ │ │ ├── cache.go
│ │ │ ├── cache_test.go
│ │ │ └── healthz_suite_test.go
│ │ ├── pager/
│ │ │ ├── pager.go
│ │ │ ├── pager_suite_test.go
│ │ │ └── pager_test.go
│ │ ├── routes/
│ │ │ └── profiling.go
│ │ ├── strings.go
│ │ ├── strings_test.go
│ │ ├── test/
│ │ │ ├── envtest.go
│ │ │ ├── matchers/
│ │ │ │ ├── condition.go
│ │ │ │ ├── errors.go
│ │ │ │ ├── matchers.go
│ │ │ │ └── object.go
│ │ │ ├── object.go
│ │ │ └── paths.go
│ │ └── utils_suite_test.go
│ └── webhook/
│ ├── add.go
│ └── sharder/
│ ├── add.go
│ ├── add_test.go
│ ├── handler.go
│ ├── handler_test.go
│ ├── metrics.go
│ └── sharder_suite_test.go
├── test/
│ ├── e2e/
│ │ ├── checksum_controller_test.go
│ │ └── e2e_suite_test.go
│ └── integration/
│ ├── shard/
│ │ ├── controller/
│ │ │ ├── controller_suite_test.go
│ │ │ ├── controller_test.go
│ │ │ └── reconciler.go
│ │ └── lease/
│ │ ├── lease_suite_test.go
│ │ └── lease_test.go
│ └── sharder/
│ ├── controller/
│ │ ├── controllerring/
│ │ │ ├── controllerring_suite_test.go
│ │ │ └── controllerring_test.go
│ │ ├── sharder/
│ │ │ ├── sharder_suite_test.go
│ │ │ └── sharder_test.go
│ │ └── shardlease/
│ │ ├── shardlease_suite_test.go
│ │ └── shardlease_test.go
│ └── webhook/
│ └── sharder/
│ ├── sharder_suite_test.go
│ └── sharder_test.go
└── webhosting-operator/
├── PROJECT
├── README.md
├── cmd/
│ ├── experiment/
│ │ └── main.go
│ ├── measure/
│ │ ├── main.go
│ │ └── test.yaml
│ ├── samples-generator/
│ │ └── main.go
│ └── webhosting-operator/
│ └── main.go
├── config/
│ ├── experiment/
│ │ ├── base/
│ │ │ ├── job.yaml
│ │ │ ├── kustomization.yaml
│ │ │ ├── namespace.yaml
│ │ │ ├── prometheus_rbac.yaml
│ │ │ ├── rbac.yaml
│ │ │ ├── service.yaml
│ │ │ └── servicemonitor.yaml
│ │ ├── basic/
│ │ │ └── kustomization.yaml
│ │ ├── chaos/
│ │ │ └── kustomization.yaml
│ │ ├── rolling-update/
│ │ │ └── kustomization.yaml
│ │ └── scale-out/
│ │ └── kustomization.yaml
│ ├── manager/
│ │ ├── base/
│ │ │ ├── kustomization.yaml
│ │ │ ├── manager.yaml
│ │ │ ├── metrics_auth.yaml
│ │ │ ├── namespace.yaml
│ │ │ └── service.yaml
│ │ ├── controllerring/
│ │ │ ├── controllerring.yaml
│ │ │ ├── kustomization.yaml
│ │ │ ├── manager_patch.yaml
│ │ │ └── sharder_rbac.yaml
│ │ ├── crds/
│ │ │ ├── kustomization.yaml
│ │ │ ├── kustomizeconfig.yaml
│ │ │ ├── webhosting.timebertt.dev_themes.yaml
│ │ │ └── webhosting.timebertt.dev_websites.yaml
│ │ ├── devel/
│ │ │ └── kustomization.yaml
│ │ ├── overlays/
│ │ │ ├── debug/
│ │ │ │ ├── kustomization.yaml
│ │ │ │ └── manager_debug_patch.yaml
│ │ │ ├── default/
│ │ │ │ └── kustomization.yaml
│ │ │ ├── devel/
│ │ │ │ └── kustomization.yaml
│ │ │ ├── non-sharded/
│ │ │ │ ├── kustomization.yaml
│ │ │ │ └── manager_patch.yaml
│ │ │ ├── non-sharded-devel/
│ │ │ │ └── kustomization.yaml
│ │ │ └── shoot/
│ │ │ ├── default/
│ │ │ │ └── kustomization.yaml
│ │ │ ├── devel/
│ │ │ │ └── kustomization.yaml
│ │ │ ├── non-sharded/
│ │ │ │ └── kustomization.yaml
│ │ │ └── non-sharded-devel/
│ │ │ └── kustomization.yaml
│ │ ├── rbac/
│ │ │ ├── kustomization.yaml
│ │ │ ├── leader_election_role.yaml
│ │ │ ├── leader_election_role_binding.yaml
│ │ │ ├── parca_rbac.yaml
│ │ │ ├── role.yaml
│ │ │ ├── role_binding.yaml
│ │ │ ├── service_account.yaml
│ │ │ ├── theme_editor_role.yaml
│ │ │ ├── theme_viewer_role.yaml
│ │ │ ├── website_editor_role.yaml
│ │ │ └── website_viewer_role.yaml
│ │ └── with-dns/
│ │ ├── config.yaml
│ │ ├── kustomization.yaml
│ │ └── manager_patch.yaml
│ ├── monitoring/
│ │ ├── default/
│ │ │ ├── dashboards/
│ │ │ │ ├── experiments.json
│ │ │ │ ├── sharding.json
│ │ │ │ └── webhosting.json
│ │ │ └── kustomization.yaml
│ │ └── webhosting-operator/
│ │ ├── kustomization.yaml
│ │ ├── prometheus_rbac.yaml
│ │ ├── prometheusrule.yaml
│ │ └── servicemonitor.yaml
│ ├── policy/
│ │ ├── experiment-scheduling.yaml
│ │ ├── guaranteed-resources.yaml
│ │ ├── kustomization.yaml
│ │ ├── scale-up-worker-experiment.yaml
│ │ └── webhosting-operator-scheduling.yaml
│ └── samples/
│ ├── kustomization.yaml
│ ├── project_namespace.yaml
│ ├── theme_exciting.yaml
│ ├── theme_lame.yaml
│ ├── website_kubecon.yaml
│ ├── website_library.yaml
│ └── website_museum.yaml
├── go.mod
├── go.sum
├── pkg/
│ ├── apis/
│ │ ├── config/
│ │ │ ├── doc.go
│ │ │ └── v1alpha1/
│ │ │ ├── defaults.go
│ │ │ ├── doc.go
│ │ │ ├── register.go
│ │ │ ├── types.go
│ │ │ ├── zz_generated.deepcopy.go
│ │ │ └── zz_generated.defaults.go
│ │ └── webhosting/
│ │ ├── doc.go
│ │ └── v1alpha1/
│ │ ├── constants.go
│ │ ├── doc.go
│ │ ├── register.go
│ │ ├── types_theme.go
│ │ ├── types_website.go
│ │ └── zz_generated.deepcopy.go
│ ├── controllers/
│ │ └── webhosting/
│ │ ├── suite_test.go
│ │ ├── templates/
│ │ │ ├── index.go
│ │ │ ├── index.tmpl
│ │ │ ├── index_test.go
│ │ │ ├── internal/
│ │ │ │ └── examples.go
│ │ │ ├── nginx.conf.tmpl
│ │ │ ├── nginx.go
│ │ │ ├── nginx_test.go
│ │ │ ├── templates_suite_test.go
│ │ │ └── testserver/
│ │ │ └── server.go
│ │ └── website_controller.go
│ ├── experiment/
│ │ ├── generator/
│ │ │ ├── options.go
│ │ │ ├── project.go
│ │ │ ├── reconciler.go
│ │ │ ├── theme.go
│ │ │ ├── utils.go
│ │ │ └── website.go
│ │ ├── scenario/
│ │ │ ├── all/
│ │ │ │ └── all.go
│ │ │ ├── base/
│ │ │ │ └── base.go
│ │ │ ├── basic/
│ │ │ │ └── basic.go
│ │ │ ├── chaos/
│ │ │ │ └── chaos.go
│ │ │ ├── rolling-update/
│ │ │ │ └── rolling_update.go
│ │ │ └── scale-out/
│ │ │ └── scale_out.go
│ │ ├── scenario.go
│ │ └── tracker/
│ │ ├── tracker.go
│ │ └── website.go
│ ├── metrics/
│ │ ├── add.go
│ │ ├── theme.go
│ │ └── website.go
│ └── utils/
│ ├── kubernetes.go
│ └── utils.go
└── test/
└── e2e/
├── e2e_suite_test.go
└── webhosting_operator_test.go
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitattributes
================================================
docs/assets/*.jpg filter=lfs diff=lfs merge=lfs -text
================================================
FILE: .github/ISSUE_TEMPLATE/bug.md
================================================
---
name: Bug Report
about: Report a bug encountered while using this project
labels: bug
---
**What happened**:
**What you expected to happen**:
**How to reproduce it (as minimally and precisely as possible)**:
**Anything else we need to know?**:
**Environment**:
- kubernetes-controller-sharding version:
- Kubernetes version:
- Others:
================================================
FILE: .github/ISSUE_TEMPLATE/enhancement.md
================================================
---
name: Enhancement Request
about: Suggest an enhancement to this project
labels: enhancement
---
**What would you like to be added**:
**Why is this needed**:
================================================
FILE: .github/PULL_REQUEST_TEMPLATE.md
================================================
**What this PR does / why we need it**:
**Which issue(s) this PR fixes**:
Fixes #
**Special notes for your reviewer**:
================================================
FILE: .github/release.yaml
================================================
changelog:
exclude:
labels:
- no-release-note
categories:
- title: ⚠️ Breaking Changes
labels:
- breaking
- title: ✨ Features
labels:
- enhancement
- title: 🐛 Bug Fixes
labels:
- bug
- title: 📖 Documentation
labels:
- documentation
- title: 🧹 Cleanups
labels:
- cleanup
- title: 🤖 Dependencies
labels:
- dependencies
- title: ℹ️ Other Changes
labels:
- "*"
================================================
FILE: .github/renovate.json5
================================================
{
$schema: 'https://docs.renovatebot.com/renovate-schema.json',
extends: [
'config:recommended',
':semanticCommitsDisabled',
'customManagers:githubActionsVersions',
],
labels: [
'dependencies',
],
postUpdateOptions: [
'gomodTidy',
],
automergeStrategy: 'squash',
// required for automerging patch updates
separateMinorPatch: true,
kubernetes: {
managerFilePatterns: [
'/\\.yaml$/',
],
},
customManagers: [
{
// generic detection for install manifests from GitHub releases
customType: 'regex',
managerFilePatterns: [
'/kustomization\\.yaml$/',
],
matchStrings: [
'https://github\\.com/(?<depName>.*/.*?)/releases/download/(?<currentValue>.*?)/',
],
datasourceTemplate: 'github-releases',
},
{
// generic detection for raw manifests from GitHub refs
customType: 'regex',
managerFilePatterns: [
'/kustomization\\.yaml$/',
],
matchStrings: [
'https://raw.githubusercontent.com/(?<depName>.*?/.*?)/(?<currentValue>.*?)/',
],
datasourceTemplate: 'github-releases',
},
{
// update `_VERSION` variables in Makefiles and scripts
// inspired by `regexManagers:dockerfileVersions` preset
customType: 'regex',
managerFilePatterns: [
'/Makefile$/',
'/\\.mk$/',
'/\\.sh$/',
],
matchStrings: [
'# renovate: datasource=(?<datasource>[a-z-.]+?) depName=(?<depName>[^\\s]+?)(?: (lookupName|packageName)=(?<packageName>[^\\s]+?))?(?: versioning=(?<versioning>[^\\s]+?))?(?: extractVersion=(?<extractVersion>[^\\s]+?))?(?: registryUrl=(?<registryUrl>[^\\s]+?))?\\s.+?_VERSION *[?:]?= *"?(?<currentValue>.+?)"?\\s',
],
},
{
// custom manager for updating kind node image tag and digest
customType: "regex",
managerFilePatterns: [
"/^Makefile$/",
],
matchStrings: [
"(?<depName>kindest/node):(?<currentValue>[^@]+)(?:@(?<currentDigest>[^\\s]+))?",
],
datasourceTemplate: "docker",
}
],
packageRules: [
{
// disable update of dependency on the main module
matchPackageNames: [
'github.com/timebertt/kubernetes-controller-sharding',
],
enabled: false,
},
{
// automerge non-major updates except 0.* versions
// similar to :automergeStableNonMajor preset, but also works for versioning schemes without range support
matchUpdateTypes: [
'minor',
'patch',
],
matchCurrentVersion: '!/^v?0\\./',
automerge: true,
},
{
// automerge patch updates
matchUpdateTypes: [
'patch',
],
automerge: true,
},
{
// automerge non-major golang.org/x updates
matchDatasources: [
'go',
],
matchPackageNames: [
'golang.org/x/*',
],
matchUpdateTypes: [
'minor',
'patch',
'digest',
],
automerge: true,
},
{
// disable automerge for go minor updates
matchDatasources: [
'golang-version',
],
matchUpdateTypes: [
'minor',
],
automerge: false,
},
{
// bump k8s and controller-runtime go dependencies together
groupName: 'k8s packages',
groupSlug: 'k8s-go',
matchDatasources: [
'go',
],
matchPackageNames: [
// from "group:kubernetes"
'k8s.io/api',
'k8s.io/apiextensions-apiserver',
'k8s.io/apimachinery',
'k8s.io/apiserver',
'k8s.io/cli-runtime',
'k8s.io/client-go',
'k8s.io/cloud-provider',
'k8s.io/cluster-bootstrap',
'k8s.io/code-generator',
'k8s.io/component-base',
'k8s.io/controller-manager',
'k8s.io/cri-api',
'k8s.io/csi-translation-lib',
'k8s.io/kube-aggregator',
'k8s.io/kube-controller-manager',
'k8s.io/kube-proxy',
'k8s.io/kube-scheduler',
'k8s.io/kubectl',
'k8s.io/kubelet',
'k8s.io/legacy-cloud-providers',
'k8s.io/metrics',
'k8s.io/mount-utils',
'k8s.io/pod-security-admission',
'k8s.io/sample-apiserver',
'k8s.io/sample-cli-plugin',
'k8s.io/sample-controller',
// added packages
'sigs.k8s.io/controller-runtime',
],
},
{
// disable automerge for k8s minor updates
matchPackageNames: [
// datasource=go
'k8s.io/**', // includes more than the k8s-go group! (e.g., k8s.io/utils)
'sigs.k8s.io/controller-runtime',
// datasource=github-releases
'kubernetes/kubernetes',
'kubernetes-sigs/controller-tools',
],
matchUpdateTypes: [
'minor',
],
automerge: false,
},
{
// automerge k8s.io/utils updates
matchDatasources: [
'go',
],
matchPackageNames: [
'k8s.io/utils',
],
matchUpdateTypes: [
'digest',
],
automerge: true,
},
{
// jsonpatch major version has to be kept in sync with k8s and controller-runtime dependencies
matchDatasources: [
'go',
],
matchPackageNames: [
'gomodules.xyz/jsonpatch/*',
],
matchUpdateTypes: [
'major',
],
enabled: false,
},
{
// kind minor k8s version should be updated together with shoot k8s version
matchPackageNames: [
'kindest/node',
],
matchUpdateTypes: [
'minor',
],
enabled: false,
},
// don't add internal dependency updates to release notes
{
matchFileNames: [
'hack/config/**',
'hack/tools.mk',
],
matchPackageNames: [
'!kubernetes-sigs/controller-tools',
'!ko-build/ko',
],
addLabels: [
'no-release-note',
],
},
{
matchDatasources: [
'go',
],
matchPackageNames: [
'github.com/onsi/gomega',
'github.com/onsi/ginkgo/*',
'k8s.io/utils',
],
addLabels: [
'no-release-note',
],
},
{
// combine upgrade of manifests and image tag in one PR
groupName: 'external-dns',
matchPackageNames: [
'/external-dns/',
],
},
{
// special case for ingress-nginx: version is prefixed with `controller-`
matchDatasources: [
'github-releases',
],
matchPackageNames: [
'kubernetes/ingress-nginx',
],
versionCompatibility: '^(?<compatibility>.*)-(?<version>.+)$',
},
{
// manual action required: upgrading kube-prometheus is not fully automated yet
matchDatasources: [
'github-releases',
],
matchPackageNames: [
'prometheus-operator/kube-prometheus',
],
prHeader: '⚠️ Manual action required ⚠️\nPlease check this PR out and run `hack/config/monitoring/update.sh`.',
},
{
// kube-prometheus manifests are generated and managed by update.sh, disable renovate bumps
matchFileNames: [
'hack/config/monitoring/{crds,kube-prometheus}/**',
],
enabled: false,
},
// help renovate fetch changelogs for packages that don't have any sourceUrl metadata attached
{
matchPackageNames: [
'registry.k8s.io/prometheus-adapter/prometheus-adapter',
],
changelogUrl: 'https://github.com/kubernetes-sigs/prometheus-adapter',
},
{
matchPackageNames: [
'quay.io/brancz/kube-rbac-proxy',
],
changelogUrl: 'https://github.com/brancz/kube-rbac-proxy',
},
],
}
================================================
FILE: .github/workflows/e2e.yaml
================================================
name: e2e
on:
push:
branches:
- main
tags:
- v*
paths-ignore:
- "**.md"
pull_request:
jobs:
e2e-kind:
runs-on: ubuntu-latest
env:
ARTIFACTS: artifacts
steps:
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
- run: make ci-e2e-kind
- uses: actions/upload-artifact@v5
if: always()
with:
name: e2e-artifacts
path: artifacts
if-no-files-found: error
================================================
FILE: .github/workflows/images.yaml
================================================
name: images
on:
push:
branches:
- main
tags:
- v*
pull_request:
jobs:
images:
runs-on: ubuntu-latest
env:
# renovate: datasource=github-releases depName=ko-build/ko
KO_VERSION: v0.18.1
steps:
- uses: actions/checkout@v5
with:
# fetch all history so that git describe works (needed by hack/prepare-image-metadata.sh)
fetch-depth: 0
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
- uses: ko-build/setup-ko@v0.9
with:
version: ${{ env.KO_VERSION }}
- name: Prepare image metadata
id: meta
run: ./hack/prepare-image-metadata.sh
- name: ko build
run: |
set -ex
# prepare .ko.yaml to inject build settings into all images
entrypoints=(
./cmd/sharder
./cmd/checksum-controller
./webhosting-operator/cmd/experiment
./webhosting-operator/cmd/webhosting-operator
)
echo builds: > .ko.yaml
for entrypoint in "${entrypoints[@]}" ; do
cat >> .ko.yaml <<EOF
- main: $entrypoint
ldflags:
- |
{{.Env.LDFLAGS}}
EOF
done
ko build --push=${{ github.event_name != 'pull_request' }} --sbom none --base-import-paths \
--tags "${{ steps.meta.outputs.tags }}" --image-label "${{ steps.meta.outputs.labels }}" \
--platform linux/amd64,linux/arm64 \
"${entrypoints[@]}"
================================================
FILE: .github/workflows/release-notes.yaml
================================================
name: release-notes
on:
push:
branches:
- main
workflow_dispatch: {}
jobs:
release-notes:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- name: Draft release notes
run: |
set -o errexit
set -o nounset
set -o pipefail
set -x
latest_tag="$(gh release view --json tagName --jq .tagName)"
major="$(echo "$latest_tag" | cut -d. -f1)"
minor="$(echo "$latest_tag" | cut -d. -f2)"
new_tag="$major.$((minor+1)).0"
if [ "$(gh release view "$new_tag" --json isDraft --jq .isDraft)" = true ] ; then
# clean up previous draft release
gh release delete -y "$new_tag"
fi
gh release create "$new_tag" --draft --generate-notes --notes-start-tag="${latest_tag%.*}.0"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
================================================
FILE: .github/workflows/renovate.yaml
================================================
name: renovate
on:
push:
branches:
- renovate/*
jobs:
post-update:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
with:
token: ${{ secrets.RENOVATE_TOKEN }}
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
# prevent triggering infinite loop of this action
- name: safety check
id: safety
run: |
if git log -1 --pretty=full | grep '\[skip renovate-post-update\]' >/dev/null ; then
echo "Skipping renovate post update workflow"
echo "skip=true" >> $GITHUB_OUTPUT
fi
# Some dependency updates might require updating go.work.sum.
# Automatically run `make tidy` on renovate branches as long as renovate doesn't know how to handle go workspaces.
# Some dependency updates might require re-running code generation.
# Run `make generate` and commit all changes if any.
- run: make tidy generate
if: steps.safety.outputs.skip != 'true'
- uses: stefanzweifel/git-auto-commit-action@v7
if: steps.safety.outputs.skip != 'true'
with:
commit_message: |
make tidy generate
[skip renovate-post-update]
# commit with renovate's user, so that it doesn't block further updates to the PR
commit_user_name: renovate[bot]
commit_user_email: 29139614+renovate[bot]@users.noreply.github.com
commit_author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
================================================
FILE: .github/workflows/verify.yaml
================================================
name: verify
on:
push:
branches:
- main
tags:
- v*
pull_request:
jobs:
verify:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
- run: make verify
================================================
FILE: .gitignore
================================================
*.secret*
.envrc
hack/kind_kubeconfig.yaml
.gitguardian.yaml
.ko.yaml
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
bin
testbin/*
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# editor and IDE settings
.idea
.vscode
*.swp
*.swo
*~
================================================
FILE: .golangci.yaml
================================================
version: "2"
run:
concurrency: 4
linters:
enable:
- copyloopvar
- ginkgolinter
- gocritic
- gosec
- importas
- misspell
- nilerr
- nolintlint
- prealloc
- revive
- staticcheck
- unconvert
- unparam
- whitespace
settings:
importas:
alias:
- pkg: github.com/timebertt/kubernetes-controller-sharding/apis/(\w+)/(v[\w\d]+)
alias: $1$2
- pkg: github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/apis/(\w+)/(v[\w\d]+)
alias: $1$2
- pkg: k8s.io/api/(\w+)/(v[\w\d]+)
alias: $1$2
- pkg: k8s.io/apimachinery/pkg/apis/(\w+)/(v[\w\d]+)
alias: $1$2
- pkg: k8s.io/apimachinery/pkg/api/([^m]\w+)
alias: api${1}
- pkg: k8s.io/apimachinery/pkg/util/(\w+)
alias: util${1}
- pkg: k8s.io/client-go/tools/clientcmd/api/(\w+)
alias: clientcmd${1}
- pkg: k8s.io/client-go/tools/cache
alias: toolscache
- pkg: k8s.io/component-base/config/(v[\w\d]+)
alias: componentbaseconfig$1
- pkg: k8s.io/utils/clock/testing
alias: testclock
- pkg: sigs.k8s.io/controller-runtime/pkg/client/fake
alias: fakeclient
- pkg: sigs.k8s.io/controller-runtime/pkg/log/zap
alias: logzap
- pkg: sigs.k8s.io/controller-runtime/pkg/log
alias: logf
misspell:
locale: US
nolintlint:
require-specific: true
revive:
rules:
- name: context-as-argument
- name: duplicated-imports
- name: early-return
- name: exported
- name: unreachable-code
exclusions:
generated: strict
presets:
- comments
- common-false-positives
- std-error-handling
rules:
- linters:
- staticcheck
path: pkg/utils/test
text: 'ST1001: should not use dot imports'
- linters:
- nolintlint
text: should be written without leading space
================================================
FILE: .run/experiment (kind).run.xml
================================================
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="experiment (kind)" type="GoApplicationRunConfiguration" factoryName="Go Application">
<module name="kubernetes-controller-sharding" />
<working_directory value="$PROJECT_DIR$/webhosting-operator" />
<parameters value="basic" />
<envs>
<env name="KUBECONFIG" value="$PROJECT_DIR$/hack/kind_kubeconfig.yaml" />
</envs>
<kind value="PACKAGE" />
<package value="github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/cmd/experiment" />
<directory value="$PROJECT_DIR$" />
<filePath value="$PROJECT_DIR$/webhosting-operator/cmd/experiment/main.go" />
<method v="2" />
</configuration>
</component>
================================================
FILE: .run/shard (kind).run.xml
================================================
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="checksum-controller (kind)" type="GoApplicationRunConfiguration" factoryName="Go Application">
<module name="kubernetes-controller-sharding" />
<working_directory value="$PROJECT_DIR$" />
<parameters value="--zap-devel --shard-name=checksum-controller-host --lease-namespace=default" />
<envs>
<env name="KUBECONFIG" value="$PROJECT_DIR$/hack/kind_kubeconfig.yaml" />
</envs>
<kind value="PACKAGE" />
<package value="github.com/timebertt/kubernetes-controller-sharding/cmd/checksum-controller" />
<directory value="$PROJECT_DIR$" />
<filePath value="$PROJECT_DIR$/webhosting-operator/cmd/experiment/main.go" />
<method v="2" />
</configuration>
</component>
================================================
FILE: .run/sharder (kind).run.xml
================================================
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="sharder (kind)" type="GoApplicationRunConfiguration" factoryName="Go Application">
<module name="kubernetes-controller-sharding" />
<working_directory value="$PROJECT_DIR$" />
<parameters value="--config=hack/config/sharder/host/config.yaml --zap-devel" />
<envs>
<env name="LEADER_ELECT" value="false" />
<env name="KUBECONFIG" value="$PROJECT_DIR$/hack/kind_kubeconfig.yaml" />
</envs>
<kind value="PACKAGE" />
<package value="github.com/timebertt/kubernetes-controller-sharding/cmd/sharder" />
<directory value="$PROJECT_DIR$" />
<filePath value="$PROJECT_DIR$/webhosting-operator/cmd/experiment/main.go" />
<method v="2" />
</configuration>
</component>
================================================
FILE: .run/webhosting-operator (kind).run.xml
================================================
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="webhosting-operator (kind)" type="GoApplicationRunConfiguration" factoryName="Go Application">
<module name="kubernetes-controller-sharding" />
<working_directory value="$PROJECT_DIR$/webhosting-operator" />
<envs>
<env name="ENABLE_SHARDING" value="true" />
<env name="KUBECONFIG" value="$PROJECT_DIR$/hack/kind_kubeconfig.yaml" />
</envs>
<kind value="PACKAGE" />
<package value="github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/cmd/webhosting-operator" />
<directory value="$PROJECT_DIR$" />
<filePath value="$PROJECT_DIR$/webhosting-operator/main.go" />
<method v="2" />
</configuration>
</component>
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: Makefile
================================================
PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
# Image URL to use all building/pushing image targets
TAG ?= latest
GHCR_REPO ?= ghcr.io/timebertt/kubernetes-controller-sharding
SHARDER_IMG ?= $(GHCR_REPO)/sharder:$(TAG)
CHECKSUM_CONTROLLER_IMG ?= $(GHCR_REPO)/checksum-controller:$(TAG)
WEBHOSTING_OPERATOR_IMG ?= $(GHCR_REPO)/webhosting-operator:$(TAG)
EXPERIMENT_IMG ?= $(GHCR_REPO)/experiment:$(TAG)
# Optionally, overwrite the envtest version or assets directory to use
ENVTEST_K8S_VERSION =
KUBEBUILDER_ASSETS =
# Setting SHELL to bash allows bash commands to be executed by recipes.
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
SHELL = /usr/bin/env bash -o pipefail
.SHELLFLAGS = -ec
.PHONY: all
all: build
##@ General
# The help target prints out all targets with their descriptions organized
# beneath their categories. The categories are represented by '##@' and the
# target descriptions by '##'. The awk commands is responsible for reading the
# entire set of makefiles included in this invocation, looking for lines of the
# file as xyz: ## something, and then pretty-format the target and help. Then,
# if there's a line with ##@ something, that gets pretty-printed as a category.
# More info on the usage of ANSI control characters for terminal formatting:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
# More info on the awk command:
# http://linuxcommand.org/lc3_adv_awk.php
.PHONY: help
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
##@ Tools
include hack/tools.mk
.PHONY: clean-tools-bin
clean-tools-bin: ## Empty the tools binary directory.
rm -rf $(TOOLS_BIN_DIR)/*
##@ Development
.PHONY: tidy
tidy: ## Runs go mod to ensure modules are up to date.
go mod tidy
cd webhosting-operator && go mod tidy
@# regenerate go.work.sum
rm -f go.work.sum
go mod download
.PHONY: generate-fast
generate-fast: $(CONTROLLER_GEN) tidy ## Run all fast code generators for the main module.
$(CONTROLLER_GEN) rbac:roleName=sharder crd paths="./pkg/..." output:rbac:artifacts:config=config/rbac output:crd:artifacts:config=config/crds
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./pkg/..."
.PHONY: generate-fast-webhosting
generate-fast-webhosting: $(CONTROLLER_GEN) tidy ## Run all fast code generators for the webhosting-operator module.
$(CONTROLLER_GEN) rbac:roleName=operator crd paths="./webhosting-operator/..." output:rbac:artifacts:config=webhosting-operator/config/manager/rbac output:crd:artifacts:config=webhosting-operator/config/manager/crds
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./webhosting-operator/..."
.PHONY: generate
generate: $(VGOPATH) generate-fast generate-fast-webhosting tidy ## Run all code generators.
hack/update-codegen.sh
.PHONY: fmt
fmt: ## Run go fmt against code.
go fmt ./...
cd webhosting-operator && go fmt ./...
.PHONY: test
test: ## Run unit tests.
./hack/test.sh ./cmd/... ./pkg/... ./webhosting-operator/pkg/...
.PHONY: test-integration
test-integration: $(SETUP_ENVTEST) ## Run integration tests.
./hack/test-integration.sh ./test/integration/...
.PHONY: test-e2e
test-e2e: $(GINKGO) ## Run e2e tests.
./hack/test-e2e.sh $(GINKGO_FLAGS) ./test/e2e/... ./webhosting-operator/test/e2e/...
.PHONY: skaffold-fix
skaffold-fix: $(SKAFFOLD) ## Upgrade skaffold configuration to the latest apiVersion.
$(SKAFFOLD) fix --overwrite
[ ! -f $(SKAFFOLD_FILENAME).v2 ] || rm $(SKAFFOLD_FILENAME).v2
##@ Verification
.PHONY: lint
lint: $(GOLANGCI_LINT) ## Run golangci-lint against code.
$(GOLANGCI_LINT) run ./... ./webhosting-operator/...
.PHONY: check
check: lint test test-integration ## Check everything (lint + test + test-integration).
.PHONY: verify-fmt
verify-fmt: fmt ## Verify go code is formatted.
@if !(git diff --quiet HEAD); then \
echo "unformatted files detected, please run 'make fmt'"; exit 1; \
fi
.PHONY: verify-generate
verify-generate: generate ## Verify generated files are up to date.
@if !(git diff --quiet HEAD); then \
echo "generated files are out of date, please run 'make generate'"; exit 1; \
fi
.PHONY: verify-tidy
verify-tidy: tidy ## Verify go module files are up to date.
@if !(git diff --quiet HEAD -- go.work.sum go.{mod,sum} webhosting-operator/go.{mod,sum}); then \
echo "go module files are out of date, please run 'make tidy'"; exit 1; \
fi
.PHONY: verify
verify: verify-tidy verify-fmt verify-generate check ## Verify everything (all verify-* rules + check).
.PHONY: ci-e2e-kind
ci-e2e-kind: $(KIND)
./hack/ci-e2e-kind.sh
##@ Build
.PHONY: build
build: ## Build the sharder binary.
go build -o bin/sharder ./cmd/sharder
.PHONY: run
run: $(KUBECTL) generate-fast ## Run the sharder from your host and deploy prerequisites.
$(MAKE) deploy SKAFFOLD_MODULE=cert-manager
$(KUBECTL) apply --server-side --force-conflicts -k config/crds
$(KUBECTL) apply --server-side --force-conflicts -k hack/config/certificates/host
go run ./cmd/sharder --config=hack/config/sharder/host/config.yaml --zap-devel
SHARD_NAME ?= checksum-controller-$(shell tr -dc bcdfghjklmnpqrstvwxz2456789 </dev/urandom | head -c 8)
.PHONY: run-checksum-controller
run-checksum-controller: $(KUBECTL) ## Run checksum-controller from your host and deploy prerequisites.
$(KUBECTL) apply --server-side --force-conflicts -k hack/config/checksum-controller/controllerring
go run ./cmd/checksum-controller --shard-name=$(SHARD_NAME) --lease-namespace=default --zap-devel
PUSH ?= false
images: export KO_DOCKER_REPO = $(GHCR_REPO)
.PHONY: images
images: $(KO) ## Build and push container images using ko.
$(KO) build --push=$(PUSH) --sbom none --base-import-paths -t $(TAG) --platform linux/amd64,linux/arm64 \
./cmd/sharder ./cmd/checksum-controller ./webhosting-operator/cmd/webhosting-operator
##@ Deployment
KIND_KUBECONFIG := $(PROJECT_DIR)/hack/kind_kubeconfig.yaml
kind-up kind-down: export KUBECONFIG = $(KIND_KUBECONFIG)
.PHONY: kind-up
kind-up: $(KIND) $(KUBECTL) ## Launch a kind cluster for local development and testing.
$(KIND) create cluster --name sharding --config hack/config/kind-config.yaml --image kindest/node:v1.33.7@sha256:d26ef333bdb2cbe9862a0f7c3803ecc7b4303d8cea8e814b481b09949d353040
# workaround https://kind.sigs.k8s.io/docs/user/known-issues/#pod-errors-due-to-too-many-open-files
$(KUBECTL) get nodes -o name | cut -d/ -f2 | xargs -I {} docker exec {} sh -c "sysctl fs.inotify.max_user_instances=8192"
# run `export KUBECONFIG=$$PWD/hack/kind_kubeconfig.yaml` to target the created kind cluster.
.PHONY: kind-down
kind-down: $(KIND) ## Tear down the kind testing cluster.
$(KIND) delete cluster --name sharding
export SKAFFOLD_FILENAME = hack/config/skaffold.yaml
# use static label for skaffold to prevent rolling all components on every skaffold invocation
deploy up dev down: export SKAFFOLD_LABEL = skaffold.dev/run-id=sharding
# use dedicated ghcr repo for dev images to prevent spamming the "production" image repo
up dev: export SKAFFOLD_DEFAULT_REPO ?= ghcr.io/timebertt/dev-images
up dev: export SKAFFOLD_TAIL ?= true
.PHONY: deploy
deploy: $(SKAFFOLD) $(KUBECTL) $(YQ) ## Build all images and deploy everything to K8s cluster specified in $KUBECONFIG.
$(SKAFFOLD) deploy -i $(SHARDER_IMG) -i $(CHECKSUM_CONTROLLER_IMG) -i $(WEBHOSTING_OPERATOR_IMG) -i $(EXPERIMENT_IMG)
.PHONY: up
up: $(SKAFFOLD) $(KUBECTL) $(YQ) ## Build all images, deploy everything to K8s cluster specified in $KUBECONFIG, start port-forward and tail logs.
$(SKAFFOLD) run
.PHONY: dev
dev: $(SKAFFOLD) $(KUBECTL) $(YQ) ## Start continuous dev loop with skaffold.
$(SKAFFOLD) dev --port-forward=user --cleanup=false --trigger=manual
.PHONY: down
down: $(SKAFFOLD) $(KUBECTL) $(YQ) ## Remove everything from K8s cluster specified in $KUBECONFIG.
$(SKAFFOLD) delete
================================================
FILE: README.md
================================================
# Kubernetes Controller Sharding
_Horizontally Scalable Kubernetes Controllers_ 🚀
## TL;DR 📖
Make Kubernetes controllers horizontally scalable by distributing reconciliation of API objects across multiple controller instances.
Remove the limitation to have only a single active replica (leader) per controller.
See [Getting Started With Controller Sharding](docs/getting-started.md) for a quick start with this project.
I presented this project at KubeCon Europe 2025 London ([recording](https://youtu.be/OTzd9eTtLRA)) and ContainerDays 2025 Hamburg ([recording](https://youtu.be/SEy-Z00SSpM)).
Check out the recordings for a 30-minute overview with demos!
## About ℹ️
I started this project as part of my Master's studies in Computer Science at the [DHBW Center for Advanced Studies](https://www.cas.dhbw.de/) (CAS).
I completed a study project ("half-time thesis") on this topic and evolved it in my Master's thesis.
- Download and read the study project (first paper) here: [thesis-controller-sharding](https://github.com/timebertt/thesis-controller-sharding)
- Download and read the Master's thesis (second paper) here: [masters-thesis-controller-sharding](https://github.com/timebertt/masters-thesis-controller-sharding)
This repository contains the implementation belonging to the scientific work: the actual sharding implementation, a sample operator using controller sharding, a monitoring and continuous profiling setup, and some tools for development and evaluation purposes.
Since finishing the scientific work, this project evolved further, especially in the [v0.9 release](https://github.com/timebertt/kubernetes-controller-sharding/releases/tag/v0.9.0).
Be aware that the Master's thesis might not reflect the current development state in all descriptions.
## Motivation 💡
Typically, [Kubernetes controllers](https://kubernetes.io/docs/concepts/architecture/controller/) use a leader election mechanism to determine a *single* active controller instance (leader).
When deploying multiple instances of the same controller, there will only be one active instance at any given time, other instances will be on standby.
This is done to prevent multiple controller instances from performing uncoordinated and conflicting actions (reconciliations) on a single object concurrently.
If the current leader goes down and loses leadership (e.g. network failure, rolling update) another instance takes over leadership and becomes the active instance.
Such a setup can be described as an "active-passive HA setup". It minimizes "controller downtime" and facilitates fast fail-overs.
However, it cannot be considered as "horizontal scaling" as work is not distributed among multiple instances.
This restriction imposes scalability limitations for Kubernetes controllers.
I.e., the rate of reconciliations, amount of objects, etc. is limited by the machine size that the active controller runs on and the network bandwidth it can use.
In contrast to usual stateless applications, one cannot increase the throughput of the system by adding more instances (scaling horizontally) but only by using bigger instances (scaling vertically).
## Introduction 🚀
This project allows scaling Kubernetes controllers horizontally by removing the restriction of having only one active replica per controller (allows active-active setups).
It distributes reconciliation of Kubernetes objects across multiple controller instances, while still ensuring that only a single controller instance acts on a single object at any given time.
For this, the project applies proven sharding mechanisms used in distributed databases to Kubernetes controllers.
The project introduces a `sharder` component that implements sharding in a generic way and can be applied to any Kubernetes controller (independent of the used programming language and controller framework).
The `sharder` component is installed into the cluster along with a `ControllerRing` custom resource.
A `ControllerRing` declares a virtual ring of sharded controller instances and specifies API resources that should be distributed across shards in the ring.
It configures sharding on the cluster-scope level (i.e., objects in all namespaces), hence the `ControllerRing` name.
The watch cache is an expensive part of a controller regarding network transfer, CPU (decoding), and memory (local copy of all objects).
When running multiple instances of a controller, the individual instances must thus only watch the subset of objects they are responsible for.
Otherwise, the setup would only multiply the resource consumption.
The sharder assigns objects to instances via the shard label.
Each shard then uses a label selector with its own instance name to watch only the objects that are assigned to it.
Alongside the actual sharding implementation, this project contains a setup for simple [development, testing](docs/development.md), and [evaluation](docs/evaluation.md) of the sharding mechanism.
This includes an example operator that uses controller sharding ([webhosting-operator](webhosting-operator)).
See [Getting Started With Controller Sharding](docs/getting-started.md) for more details.
To support sharding in your Kubernetes controller, only three aspects need to be implemented:
- announce ring membership and shard health: maintain individual shard `Leases` instead of performing leader election on a single `Lease`
- only watch, cache, and reconcile objects assigned to the respective shard: add a shard-specific label selector to watches
- acknowledge object movements during rebalancing: remove the drain and shard label when the drain label is set and stop reconciling the object
See [Implement Sharding in Your Controller](docs/implement-sharding.md) for more information and examples.
## Design 📐

See [Design](docs/design.md) for more details on the sharding architecture and design decisions.
## Discussion 💬
Feel free to contact me on [LinkedIn](https://www.linkedin.com/in/timebertt/) or the [Kubernetes Slack](https://kubernetes.slack.com/) ([get an invitation](https://slack.k8s.io/)): [@timebertt](https://kubernetes.slack.com/team/UF8C35Z0D).
================================================
FILE: cmd/checksum-controller/main.go
================================================
/*
Copyright 2023 Tim Ebert.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"flag"
"fmt"
"os"
"time"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"go.uber.org/zap/zapcore"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client/config"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
shardingv1alpha1 "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/sharding/v1alpha1"
shardlease "github.com/timebertt/kubernetes-controller-sharding/pkg/shard/lease"
)
func main() {
rand.Seed(time.Now().UnixNano())
opts := newOptions()
cmd := &cobra.Command{
Use: "checksum-controller",
Short: "Run an example sharded controller",
Long: `The checksum-controller is an example for implementing the controller requirements for sharding.
For this, it creates a shard Lease object and renews it periodically.
It also starts a controller for Secrets that are assigned to the shard and handles the drain operation as expected.
See https://github.com/timebertt/kubernetes-controller-sharding/blob/main/docs/implement-sharding.md for more details.
This example sharded controller is also useful for developing the sharding components.`,
Args: cobra.NoArgs,
SilenceErrors: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := opts.validate(); err != nil {
return err
}
cmd.SilenceUsage = true
return opts.run(cmd.Context())
},
}
opts.AddFlags(cmd.Flags())
if err := cmd.ExecuteContext(signals.SetupSignalHandler()); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
type options struct {
zapOptions *zap.Options
controllerRingName string
namespace string
leaseNamespace string
shardName string
}
func newOptions() *options {
return &options{
zapOptions: &zap.Options{
TimeEncoder: zapcore.ISO8601TimeEncoder,
},
controllerRingName: "checksum-controller",
namespace: metav1.NamespaceDefault,
}
}
func (o *options) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.controllerRingName, "controllerring", o.controllerRingName, "Name of the ControllerRing the shard belongs to.")
fs.StringVar(&o.namespace, "namespace", o.namespace, "Namespace to watch objects in.")
fs.StringVar(&o.leaseNamespace, "lease-namespace", o.leaseNamespace, "Namespace to use for the shard lease. Defaults to the pod's namespace if running in-cluster.")
fs.StringVar(&o.shardName, "shard-name", o.shardName, "Name of the shard. Defaults to the instance's hostname.")
zapFlagSet := flag.NewFlagSet("zap", flag.ContinueOnError)
o.zapOptions.BindFlags(zapFlagSet)
fs.AddGoFlagSet(zapFlagSet)
}
func (o *options) validate() error {
if o.controllerRingName == "" {
return fmt.Errorf("--controllerring must not be empty")
}
return nil
}
func (o *options) run(ctx context.Context) error {
log := zap.New(zap.UseFlagOptions(o.zapOptions))
logf.SetLogger(log)
klog.SetLogger(log)
log.Info("Getting rest config")
restConfig, err := config.GetConfig()
if err != nil {
return fmt.Errorf("failed getting rest config: %w", err)
}
log.Info("Setting up shard lease")
shardLease, err := shardlease.NewResourceLock(restConfig, shardlease.Options{
ControllerRingName: o.controllerRingName,
LeaseNamespace: o.leaseNamespace, // optional, can be empty
ShardName: o.shardName, // optional, can be empty
})
if err != nil {
return fmt.Errorf("failed creating shard lease: %w", err)
}
log.Info("Setting up manager")
mgr, err := manager.New(restConfig, manager.Options{
Metrics: metricsserver.Options{
BindAddress: "0",
},
HealthProbeBindAddress: "0",
GracefulShutdownTimeout: ptr.To(5 * time.Second),
// SHARD LEASE
// Use manager's leader election mechanism for maintaining the shard lease.
// With this, controllers will only run as long as manager holds the shard lease.
// After graceful termination, the shard lease will be released.
LeaderElection: true,
LeaderElectionResourceLockInterface: shardLease,
LeaderElectionReleaseOnCancel: true,
// FILTERED WATCH CACHE
Cache: cache.Options{
// This controller only acts on objects in a single configured namespace.
DefaultNamespaces: map[string]cache.Config{o.namespace: {}},
// Configure cache to only watch objects that are assigned to this shard.
// This controller only watches sharded objects, so we can configure the label selector on the cache's global level.
// If your controller watches sharded objects as well as non-sharded objects, use cache.Options.ByObject to configure
// the label selector on object level.
DefaultLabelSelector: labels.SelectorFromSet(labels.Set{
shardingv1alpha1.LabelShard(o.controllerRingName): shardLease.Identity(),
}),
},
})
if err != nil {
return fmt.Errorf("failed setting up manager: %w", err)
}
log.Info("Setting up controller")
if err := (&Reconciler{}).AddToManager(mgr, o.controllerRingName, shardLease.Identity()); err != nil {
return fmt.Errorf("failed adding controller: %w", err)
}
log.Info("Starting manager")
managerDone := make(chan error, 1)
managerCtx, managerCancel := context.WithCancel(context.Background())
go func() {
managerDone <- mgr.Start(managerCtx)
}()
// Usually, SIGINT and SIGTERM trigger graceful termination immediately.
// For development purposes, we allow simulating non-graceful termination by delaying cancellation of the manager.
<-ctx.Done()
log.Info("Shutting down gracefully in 2 seconds, send another SIGINT or SIGTERM to shut down non-gracefully")
<-time.After(2 * time.Second)
// signal manager to shut down, wait for it to terminate, and propagate the error it returned
managerCancel()
return <-managerDone
}
================================================
FILE: cmd/checksum-controller/reconciler.go
================================================
/*
Copyright 2023 Tim Ebert.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"maps"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
shardcontroller "github.com/timebertt/kubernetes-controller-sharding/pkg/shard/controller"
)
// Reconciler watches Secrets and creates a ConfigMap for every Secret containing the Secret data's checksums.
// It handles the shard and drain label.
type Reconciler struct {
Client client.Client
}
// AddToManager adds Reconciler to the given manager.
func (r *Reconciler) AddToManager(mgr manager.Manager, controllerRingName, shardName string) error {
if r.Client == nil {
r.Client = mgr.GetClient()
}
// ACKNOWLEDGE DRAIN OPERATIONS
// Use the shardcontroller package as helpers for:
// - a predicate that triggers when the drain label is present (even if the actual predicates don't trigger)
// - wrapping the actual reconciler a reconciler that handles the drain operation for us
return builder.ControllerManagedBy(mgr).
Named("secret-checksums").
For(&corev1.Secret{}, builder.WithPredicates(shardcontroller.Predicate(controllerRingName, shardName, SecretDataChanged()))).
Owns(&corev1.ConfigMap{}, builder.WithPredicates(ObjectDeleted())).
WithOptions(controller.Options{
MaxConcurrentReconciles: 5,
}).
Complete(
shardcontroller.NewShardedReconciler(mgr).
For(&corev1.Secret{}).
InControllerRing(controllerRingName).
WithShardName(shardName).
MustBuild(r),
)
}
// SecretDataChanged returns a predicate that is similar to predicate.GenerationChangedPredicate but for Secrets
// that don't have a metadata.generation field.
func SecretDataChanged() predicate.Predicate {
return predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
return apiequality.Semantic.DeepEqual(e.ObjectOld.(*corev1.Secret).Data, e.ObjectNew.(*corev1.Secret).Data)
},
}
}
// ObjectDeleted returns a predicate that only triggers for DELETE events.
func ObjectDeleted() predicate.Predicate {
return predicate.Funcs{
CreateFunc: func(_ event.CreateEvent) bool { return false },
UpdateFunc: func(_ event.UpdateEvent) bool { return false },
DeleteFunc: func(_ event.DeleteEvent) bool { return true },
GenericFunc: func(_ event.GenericEvent) bool { return false },
}
}
// Reconcile reconciles a ConfigMap.
func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := logf.FromContext(ctx)
secret := &corev1.Secret{}
if err := r.Client.Get(ctx, req.NamespacedName, secret); err != nil {
if apierrors.IsNotFound(err) {
log.V(1).Info("Object is gone, stop reconciling")
return reconcile.Result{}, nil
}
return reconcile.Result{}, fmt.Errorf("error retrieving object from store: %w", err)
}
// Perform a typical operation in this example controller.
// Create a ConfigMap with a controller reference to the watched Secret.
log.V(1).Info("Reconciling object")
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "checksums-" + secret.Name,
Namespace: secret.Namespace,
Labels: maps.Clone(secret.Labels),
},
Data: make(map[string]string, len(secret.Data)),
}
configMap.Labels["secret"] = secret.Name
// Calculate the checksum for every Secret key and populate it in the ConfigMap.
for key, data := range secret.Data {
checksum := sha256.Sum256(data)
configMap.Data[key] = hex.EncodeToString(checksum[:])
}
if err := controllerutil.SetControllerReference(secret, configMap, r.Client.Scheme()); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, client.IgnoreAlreadyExists(r.Client.Create(ctx, configMap))
}
================================================
FILE: cmd/sharder/app/app.go
================================================
/*
Copyright 2023 Tim Ebert.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"context"
"fmt"
"github.com/go-logr/logr"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/component-base/version"
"k8s.io/component-base/version/verflag"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/healthz"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
"github.com/timebertt/kubernetes-controller-sharding/pkg/controller"
shardingmetrics "github.com/timebertt/kubernetes-controller-sharding/pkg/metrics"
healthzutils "github.com/timebertt/kubernetes-controller-sharding/pkg/utils/healthz"
"github.com/timebertt/kubernetes-controller-sharding/pkg/webhook"
)
// Name is a const for the name of this component.
const Name = "sharder"
// NewCommand creates a new cobra.Command for running sharder.
func NewCommand() *cobra.Command {
opts := newOptions()
cmd := &cobra.Command{
Use: Name,
Short: "Launch the " + Name,
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
verflag.PrintAndExitIfRequested()
if err := opts.complete(); err != nil {
return err
}
log := zap.New(zap.UseFlagOptions(opts.zapOptions))
logf.SetLogger(log)
klog.SetLogger(log)
log.Info("Starting "+Name, "version", version.Get())
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
log.Info(fmt.Sprintf("FLAG: --%s=%s", flag.Name, flag.Value))
})
// don't output usage on further errors raised during execution
cmd.SilenceUsage = true
// further errors will be logged properly, don't duplicate
cmd.SilenceErrors = true
return run(cmd.Context(), log, opts)
},
}
flags := cmd.Flags()
verflag.AddFlags(flags)
opts.addFlags(flags)
return cmd
}
func run(ctx context.Context, log logr.Logger, opts *options) error {
log.Info("Setting up manager")
mgr, err := manager.New(opts.restConfig, opts.managerOptions)
if err != nil {
return err
}
log.Info("Setting up health check endpoints")
if err := mgr.AddHealthzCheck("ping", healthz.Ping); err != nil {
return err
}
if err := mgr.AddReadyzCheck("cache-sync", healthzutils.CacheSync(mgr.GetCache())); err != nil {
return err
}
log.Info("Adding controllers to manager")
if err := controller.AddToManager(ctx, mgr, opts.config); err != nil {
return fmt.Errorf("failed adding controllers to manager: %w", err)
}
log.Info("Adding webhooks to manager")
if err := webhook.AddToManager(ctx, mgr, opts.config); err != nil {
return fmt.Errorf("failed adding webhooks to manager: %w", err)
}
log.Info("Adding metrics to manager")
if err = shardingmetrics.AddToManager(mgr); err != nil {
return fmt.Errorf("failed adding metrics to manager: %w", err)
}
log.Info("Starting manager")
return mgr.Start(ctx)
}
================================================
FILE: cmd/sharder/app/options.go
================================================
/*
Copyright 2023 Tim Ebert.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"flag"
"fmt"
"net/http"
"os"
goruntime "runtime"
"strconv"
"github.com/spf13/pflag"
"go.uber.org/zap/zapcore"
coordinationv1 "k8s.io/api/coordination/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/selection"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/rest"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
configv1alpha1 "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/config/v1alpha1"
shardingv1alpha1 "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/sharding/v1alpha1"
utilclient "github.com/timebertt/kubernetes-controller-sharding/pkg/utils/client"
"github.com/timebertt/kubernetes-controller-sharding/pkg/utils/routes"
)
var scheme = utilclient.SharderScheme
type options struct {
configFile string
zapOptions *zap.Options
config *configv1alpha1.SharderConfig
restConfig *rest.Config
managerOptions manager.Options
}
func newOptions() *options {
return &options{
zapOptions: &zap.Options{
TimeEncoder: zapcore.ISO8601TimeEncoder,
},
}
}
func (o *options) addFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.configFile, "config", o.configFile, "Path to configuration file.")
zapFlagSet := flag.NewFlagSet("zap", flag.ContinueOnError)
o.zapOptions.BindFlags(zapFlagSet)
fs.AddGoFlagSet(zapFlagSet)
}
func (o *options) complete() error {
o.config = &configv1alpha1.SharderConfig{}
// load config file if specified
if o.configFile != "" {
data, err := os.ReadFile(o.configFile)
if err != nil {
return fmt.Errorf("error reading config file: %w", err)
}
if err = runtime.DecodeInto(serializer.NewCodecFactory(scheme).UniversalDecoder(), data, o.config); err != nil {
return fmt.Errorf("error decoding config: %w", err)
}
} else {
scheme.Default(o.config)
}
// load rest config
var err error
o.restConfig, err = config.GetConfig()
if err != nil {
return fmt.Errorf("error loading kubeconfig: %w", err)
}
o.applyConfigToRESTConfig()
// bring everything together
o.managerOptions = manager.Options{
Scheme: scheme,
// allows us to quickly handover leadership on restarts
LeaderElectionReleaseOnCancel: true,
}
o.applyConfigToManagerOptions()
o.applyCacheOptions()
if err := o.applyOptionsOverrides(); err != nil {
return err
}
return nil
}
func (o *options) applyConfigToRESTConfig() {
if clientConnection := o.config.ClientConnection; clientConnection != nil {
if clientConnection.QPS > 0 {
o.restConfig.QPS = clientConnection.QPS
}
if clientConnection.Burst > 0 {
o.restConfig.Burst = int(clientConnection.Burst)
}
}
}
func (o *options) applyConfigToManagerOptions() {
if leaderElection := o.config.LeaderElection; leaderElection != nil {
o.managerOptions.LeaderElection = *leaderElection.LeaderElect
o.managerOptions.LeaderElectionResourceLock = leaderElection.ResourceLock
o.managerOptions.LeaderElectionID = leaderElection.ResourceName
o.managerOptions.LeaderElectionNamespace = leaderElection.ResourceNamespace
o.managerOptions.LeaseDuration = ptr.To(leaderElection.LeaseDuration.Duration)
o.managerOptions.RenewDeadline = ptr.To(leaderElection.RenewDeadline.Duration)
o.managerOptions.RetryPeriod = ptr.To(leaderElection.RetryPeriod.Duration)
}
o.managerOptions.HealthProbeBindAddress = o.config.Health.BindAddress
if o.config.Metrics.BindAddress != "0" {
var extraHandlers map[string]http.Handler
if *o.config.Debugging.EnableProfiling {
extraHandlers = routes.ProfilingHandlers
if *o.config.Debugging.EnableContentionProfiling {
goruntime.SetBlockProfileRate(1)
}
}
o.managerOptions.Metrics = metricsserver.Options{
SecureServing: true,
BindAddress: o.config.Metrics.BindAddress,
FilterProvider: filters.WithAuthenticationAndAuthorization,
ExtraHandlers: extraHandlers,
}
}
webhookOptions := webhook.Options{}
if serverConfig := o.config.Webhook.Server; serverConfig != nil {
webhookOptions.CertDir = ptr.Deref(serverConfig.CertDir, "")
webhookOptions.CertName = ptr.Deref(serverConfig.CertName, "")
webhookOptions.KeyName = ptr.Deref(serverConfig.KeyName, "")
}
o.managerOptions.WebhookServer = webhook.NewServer(webhookOptions)
o.managerOptions.GracefulShutdownTimeout = ptr.To(o.config.GracefulShutdownTimeout.Duration)
}
func (o *options) applyCacheOptions() {
// filter lease cache for shard leases to avoid watching all leases in cluster
leaseSelector := labels.NewSelector()
{
ringRequirement, err := labels.NewRequirement(shardingv1alpha1.LabelControllerRing, selection.Exists, nil)
utilruntime.Must(err)
leaseSelector.Add(*ringRequirement)
}
o.managerOptions.Cache = cache.Options{
DefaultTransform: dropUnwantedMetadata,
ByObject: map[client.Object]cache.ByObject{
&coordinationv1.Lease{}: {
Label: leaseSelector,
},
},
}
}
func (o *options) applyOptionsOverrides() error {
var err error
// allow overriding leader election via env var for debugging purposes
if leaderElectEnv, ok := os.LookupEnv("LEADER_ELECT"); ok {
o.managerOptions.LeaderElection, err = strconv.ParseBool(leaderElectEnv)
if err != nil {
return fmt.Errorf("error parsing LEADER_ELECT env var: %w", err)
}
}
return nil
}
func dropUnwantedMetadata(i interface{}) (interface{}, error) {
obj, ok := i.(client.Object)
if !ok {
return i, nil
}
obj.SetManagedFields(nil)
annotations := obj.GetAnnotations()
delete(annotations, "kubectl.kubernetes.io/last-applied-configuration")
obj.SetAnnotations(annotations)
return obj, nil
}
================================================
FILE: cmd/sharder/main.go
================================================
/*
Copyright 2023 Tim Ebert.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"os"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
"github.com/timebertt/kubernetes-controller-sharding/cmd/sharder/app"
)
func main() {
if err := app.NewCommand().ExecuteContext(signals.SetupSignalHandler()); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
================================================
FILE: config/README.md
================================================
# config
This directory hosts manifests for deploying the sharding components.
Manifests of components for the development setup should be hosted in [`hack/config`](../hack/config) instead.
I.e., this directory should only contain manifests that are useful for others wanting to reuse the sharding components in their setup.
================================================
FILE: config/certificate/certificate.yaml
================================================
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: webhook-server
spec:
issuerRef:
name: selfsigned
commonName: sharding:sharder:webhook
dnsNames:
- sharder.sharding-system
- sharder.sharding-system.svc
- sharder.sharding-system.svc.cluster.local
secretName: webhook-server
================================================
FILE: config/certificate/issuer.yaml
================================================
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: selfsigned
spec:
selfSigned: {}
================================================
FILE: config/certificate/kustomization.yaml
================================================
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component
namespace: sharding-system
labels:
- includeSelectors: true
pairs:
app.kubernetes.io/name: controller-sharding
resources:
- certificate.yaml
- issuer.yaml
patches:
- patch: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: sharder
namespace: sharding-system
spec:
template:
spec:
containers:
- name: sharder
volumeMounts:
- name: cert
mountPath: /tmp/k8s-webhook-server/serving-certs
volumes:
- name: cert
secret:
secretName: webhook-server
================================================
FILE: config/crds/kustomization.yaml
================================================
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: true
pairs:
app.kubernetes.io/name: controller-sharding
resources:
- namespace.yaml
- sharding.timebertt.dev_controllerrings.yaml
================================================
FILE: config/crds/namespace.yaml
================================================
apiVersion: v1
kind: Namespace
metadata:
name: sharding-system
================================================
FILE: config/crds/sharding.timebertt.dev_controllerrings.yaml
================================================
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.19.0
name: controllerrings.sharding.timebertt.dev
spec:
group: sharding.timebertt.dev
names:
kind: ControllerRing
listKind: ControllerRingList
plural: controllerrings
singular: controllerring
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .status.conditions[?(@.type == "Ready")].status
name: Ready
type: string
- jsonPath: .status.availableShards
name: Available
type: string
- jsonPath: .status.shards
name: Shards
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1alpha1
schema:
openAPIV3Schema:
description: |-
ControllerRing declares a virtual ring of sharded controller instances. Objects of the specified resources are
distributed across shards of this ring. Objects in all namespaces are considered unless a namespaceSelector is
specified.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: Spec contains the specification of the desired behavior of
the ControllerRing.
properties:
namespaceSelector:
description: |-
NamespaceSelector overwrites the webhook configs' namespaceSelector.
If set, this selector should exclude the kube-system and sharding-system namespaces.
If omitted, the default namespaceSelector from the SharderConfig is used.
Note: changing/unsetting this selector will not remove labels from objects in namespaces that were previously
included.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: |-
A label selector requirement is a selector that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: |-
operator represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: |-
values is an array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
matchLabels:
additionalProperties:
type: string
description: |-
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
resources:
description: Resources specifies the list of resources that are distributed
across shards in this ControllerRing.
items:
description: RingResource specifies a resource along with controlled
resources that is distributed across shards in a ring.
properties:
controlledResources:
description: |-
ControlledResources are additional resources that are distributed across shards in the ControllerRing.
These resources are controlled by the controller's main resource, i.e., they have an owner reference with
controller=true back to the GroupResource of this RingResource.
Typically, the controller also watches objects of this resource and enqueues the owning object (of the main
resource) whenever the status of a controlled object changes.
items:
description: |-
GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying
concepts during lookup stages without having partially valid types
properties:
group:
type: string
resource:
type: string
required:
- group
- resource
type: object
type: array
x-kubernetes-list-map-keys:
- group
- resource
x-kubernetes-list-type: map
group:
type: string
resource:
type: string
required:
- group
- resource
type: object
type: array
x-kubernetes-list-map-keys:
- group
- resource
x-kubernetes-list-type: map
type: object
status:
description: Status contains the most recently observed status of the
ControllerRing.
properties:
availableShards:
description: AvailableShards is the total number of available shards
of this ring.
format: int32
type: integer
conditions:
description: |-
Conditions represents the observations of a foo's current state.
Known .status.conditions.type are: "Available", "Progressing", and "Degraded"
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
observedGeneration:
description: The generation observed by the ControllerRing controller.
format: int64
type: integer
shards:
description: Shards is the total number of shards of this ring.
format: int32
type: integer
required:
- availableShards
- shards
type: object
type: object
x-kubernetes-validations:
- message: ControllerRing name must not be longer than 63 characters
rule: size(self.metadata.name) <= 63
served: true
storage: true
subresources:
status: {}
================================================
FILE: config/default/kustomization.yaml
================================================
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../crds
- ../sharder
components:
- ../certificate
================================================
FILE: config/monitoring/kustomization.yaml
================================================
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: sharding-system
resources:
- servicemonitor.yaml
# provide prometheus running in namespace "monitoring" with the permissions required for service discovery in namespace
# "sharding-system"
- prometheus_rbac.yaml
================================================
FILE: config/monitoring/prometheus_rbac.yaml
================================================
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: prometheus
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
name: prometheus-k8s-service-discovery
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: prometheus
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
name: prometheus-k8s-service-discovery
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: prometheus-k8s-service-discovery
subjects:
- kind: ServiceAccount
name: prometheus-k8s
namespace: monitoring
================================================
FILE: config/monitoring/servicemonitor.yaml
================================================
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: sharder
labels:
app.kubernetes.io/name: controller-sharding
app.kubernetes.io/component: sharder
spec:
jobLabel: app.kubernetes.io/component
endpoints:
- path: /metrics
port: metrics
scheme: https
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
interval: 10s
scrapeTimeout: 10s
tlsConfig:
insecureSkipVerify: true
relabelings:
- action: labelmap
regex: "__meta_kubernetes_pod_label_label_prometheus_io_(.*)"
replacement: "${1}"
selector:
matchLabels:
app.kubernetes.io/name: controller-sharding
app.kubernetes.io/component: sharder
================================================
FILE: config/rbac/kustomization.yaml
================================================
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- serviceaccount.yaml
- leader_election.yaml
- metrics_auth.yaml
- role.yaml
- rolebinding.yaml
- pprof_reader.yaml
patches:
# This is a workaround for controller-gen not being able to handle colons in the role name option.
- target:
kind: ClusterRole
name: sharder
patch: |
- op: replace
path: /metadata/name
value: sharding:sharder
================================================
FILE: config/rbac/leader_election.yaml
================================================
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: sharding:sharder:leader-election
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- create
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: sharding:sharder:leader-election
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: sharding:sharder:leader-election
subjects:
- kind: ServiceAccount
name: sharder
namespace: sharding-system
================================================
FILE: config/rbac/metrics_auth.yaml
================================================
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: sharding:metrics-auth
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: sharding:metrics-auth
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: sharding:metrics-auth
subjects:
- kind: ServiceAccount
name: sharder
namespace: sharding-system
================================================
FILE: config/rbac/pprof_reader.yaml
================================================
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: sharding:sharder:pprof-reader
rules:
- nonResourceURLs:
- "/debug/pprof/allocs"
- "/debug/pprof/block"
- "/debug/pprof/goroutine"
- "/debug/pprof/heap"
- "/debug/pprof/mutex"
- "/debug/pprof/profile"
- "/debug/pprof/symbol"
- "/debug/pprof/threadcreate"
- "/debug/pprof/trace"
verbs:
- get
================================================
FILE: config/rbac/role.yaml
================================================
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: sharder
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
verbs:
- create
- patch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- sharding.timebertt.dev
resources:
- controllerrings
verbs:
- get
- list
- watch
- apiGroups:
- sharding.timebertt.dev
resources:
- controllerrings/status
verbs:
- patch
- update
================================================
FILE: config/rbac/rolebinding.yaml
================================================
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: sharding:sharder
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: sharding:sharder
subjects:
- kind: ServiceAccount
name: sharder
namespace: sharding-system
================================================
FILE: config/rbac/serviceaccount.yaml
================================================
apiVersion: v1
kind: ServiceAccount
metadata:
name: sharder
automountServiceAccountToken: false
================================================
FILE: config/sharder/config.yaml
================================================
apiVersion: config.sharding.timebertt.dev/v1alpha1
kind: SharderConfig
webhook:
config:
annotations:
# Technically, this belongs to the certificate component. It doesn't hurt to add this by default though.
# Kustomize doesn't allow merging config files in ConfigMaps. Hence, keep the full default config here.
cert-manager.io/inject-ca-from: sharding-system/webhook-server
================================================
FILE: config/sharder/deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
name: sharder
namespace: sharding-system
labels:
app.kubernetes.io/component: sharder
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/component: sharder
template:
metadata:
labels:
app.kubernetes.io/component: sharder
spec:
automountServiceAccountToken: true
securityContext:
runAsNonRoot: true
containers:
- name: sharder
image: sharder:latest
args:
- --config=/config.yaml
volumeMounts:
- name: config
mountPath: /config.yaml
subPath: config
env:
- name: DISABLE_HTTP2
value: "true"
ports:
- name: webhook
containerPort: 9443
protocol: TCP
- name: metrics
containerPort: 8080
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 200m
memory: 512Mi
requests:
cpu: 100m
memory: 256Mi
volumes:
- name: config
configMap:
name: sharder-config
serviceAccountName: sharder
terminationGracePeriodSeconds: 30
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/component: sharder
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/component: sharder
================================================
FILE: config/sharder/kustomization.yaml
================================================
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: sharding-system
generatorOptions:
disableNameSuffixHash: true
labels:
- includeSelectors: true
pairs:
app.kubernetes.io/name: controller-sharding
images:
- name: sharder
newName: ghcr.io/timebertt/kubernetes-controller-sharding/sharder
newTag: latest
resources:
- deployment.yaml
- poddisruptionbudget.yaml
- service.yaml
- ../rbac
configMapGenerator:
- name: sharder-config
options:
labels:
app.kubernetes.io/component: sharder
files:
- config=../sharder/config.yaml
================================================
FILE: config/sharder/poddisruptionbudget.yaml
================================================
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
labels:
app.kubernetes.io/component: sharder
name: sharder
spec:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/component: sharder
================================================
FILE: config/sharder/service.yaml
================================================
apiVersion: v1
kind: Service
metadata:
name: sharder
namespace: sharding-system
labels:
app.kubernetes.io/component: sharder
spec:
type: ClusterIP
selector:
app.kubernetes.io/component: sharder
ports:
- port: 443
name: webhook
protocol: TCP
targetPort: webhook
- port: 8080
name: metrics
protocol: TCP
targetPort: metrics
================================================
FILE: docs/README.md
================================================
# Documentation Index
- [Getting Started With Controller Sharding](getting-started.md) ⬅️ start here, if you're new to the project
- [Install the Sharding Components](installation.md)
- [Implement Sharding in Your Controller](implement-sharding.md)
- [Monitoring the Sharding Components](monitoring.md)
- [Design](design.md)
- [Evaluating the Sharding Mechanism](evaluation.md)
- [Development and Testing Setup](development.md)
================================================
FILE: docs/design.md
================================================
# Design
This document explains the sharding design in more detail.
Please also consider reading the respective design chapters in the [study project](https://github.com/timebertt/thesis-controller-sharding) and [Master's thesis](https://github.com/timebertt/masters-thesis-controller-sharding) as long as this document is not detailed enough.
## Architecture
This section outlines the key components and mechanism involved in achieving controller sharding.

### The Sharder Component
The sharder is a central component deployed once per cluster.
It serves as the overall orchestrator of the sharding mechanism.
It facilitates membership and failure detection, partitioning, object assignment, and preventing concurrency.
The component is designed to be generic, i.e., it can be used for implementing sharding for any kind of controller (independent of the used programming language and controller framework).
### Shard Leases
Multiple instances of the actual controller are deployed.
Notably, no leader election is performed, and there is no designated single active instance.
Instead, each controller instance maintains an individual shard `Lease` labeled with the ring's name, allowing them to announce themselves to the sharder for membership and failure detection.
The sharder watches these leases to build a hash ring with the available instances.
### The `ControllerRing` Resource and Sharder Webhook
Rings of controllers are configured through the use of the `ControllerRing` custom resource.
The sharder creates a `MutatingWebhookConfiguration` for each `ControllerRing` to perform assignments for objects associated with the ring.
The sharder webhook is called on `CREATE` and `UPDATE` requests for configured resources, but only for objects that don't have the ring-specific shard label, i.e., for unassigned objects.
The sharder uses the consistent hashing ring to determine the desired shard and adds the shard label during admission accordingly.
Shards then use a label selector for the shard label with their own instance name to restrict the cache and controller to the subset of objects assigned to them.
For the controller's "main" object (configured in `ControllerRing.spec.resources[]`), the object's API group, `kind`, `namespace`, and `name` are concatenated to form its hash key.
For objects controlled by other objects (configured in `ControllerRing.spec.resources[].controlledResources[]`), the sharder utilizes information about the controlling object (`ownerReference` with `controller=true`) to calculate the object's hash key.
This ensures that owned objects are consistently assigned to the same shard as their owner.
### Object Movements and Rebalancing
The sharder also runs a controller that facilitates object movements when necessary.
For this, it watches the shard leases and ensures all object assignments are up-to-date whenever the set of available instances changes.
It also performs periodic syncs to cater for objects that failed to be assigned during admission.
When a shard voluntarily releases its lease (i.e., on graceful shutdown), the sharder recognizes that the shard was removed from the ring and sets its state to `dead`.
With this, the shard is no longer considered for object assignments.
The orphaned `Lease` is cleaned up after 1 minute.
The sharder immediately moves objects that were assigned to the removed shard to the remaining available shards.
For this, the controller simply removes the shard label on all affected objects and lets the webhook reassign them.
As the original shard is not available anymore, moving the objects doesn't need to be coordinated and the sharder can immediately move objects.
When a shard fails to renew its lease in time, the sharder acquires the lease for ensuring API server reachability/functionality.
If this is successful, the shard is considered `dead` which leads to forcefully reassigning the objects.
When a new shard is added to the ring, the sharder recognizes the available shard lease and performs rebalancing accordingly.
In contrast to moving objects from unavailable shards, this needs to be coordinated to prevent multiple shards from acting on the same object concurrently.
Otherwise, the shards might perform conflicting actions which might lead to a broken state of the objects.
During rebalancing, the sharder drains objects from the old shard by adding the drain label.
This operation is acknowledged by the old shard by removing both the shard and the drain label
This in turn triggers the sharder webhook again, which assigns the object to the new shard.
## Important Design Decisions
### Don't Watch Sharded Objects
Distributing a controller's reconciliations and cache across multiple instances works very well using the label selector approach.
I.e., if you run 3 shards you can expect each shard to consume about a third of the CPU and memory consumption that a single instance responsible for all objects would.
The key to making Kubernetes controllers horizontally scalable however, is to ensure that the overhead of the sharding mechanism doesn't grow with the number of objects or rate of reconciliations.
Otherwise, we would only shift the scalability limitation to another component without removing it.
In other words, sharding Kubernetes controller obviously comes with an overhead – just as sharding a database.
However, this overhead needs to be constant or at maximum grow in a sublinear fashion.
In this project's [first iteration](https://github.com/timebertt/thesis-controller-sharding), the sharder didn't use a webhook to assign objects during admission.
Instead, the sharder ran a controller with watches for the sharded objects.
Although the sharder used lightweight metadata-only watches, the overhead still grew with the number of sharded objects.
In the study project's evaluation (see chapter 6 of the paper), it was shown that the setup was already capable of distributing resource consumption across multiple instances but still faced a scalability limitation in the sharder's resource consumption.
In the [second iteration](https://github.com/timebertt/masters-thesis-controller-sharding), the sharder doesn't watch the sharded objects anymore.
The watch events were only needed for labeling unassigned objects immediately.
This is facilitated by the sharder webhook instead now.
The other cases were object assignments need to be performed (membership changes) are unrelated to the objects themselves.
Hence, the controller only needs to watch a small number of objects related to the number of shards.
With this, the overhead of the sharding mechanism is independent of the number of objects.
In fact, it is negligible as show in [Evaluating the Sharding Mechanism](evaluation.md).
The comparisons show that the sharder's resource consumption is almost constant apart from spikes during periodic syncs.
### Minimize Impact on the Critical Path
While the use of mutating webhooks might allow dropping watches for the sharded objects, they can have a significant impact on API requests, e.g., regarding request latency.
To minimize the impact of the sharder's webhook on the overall request latency, the webhook is configured to only react on precisely the set of objects configured in the `ControllerRing` and only for `CREATE` and `UPDATE` requests of unassigned objects.
With this the webhook is only on the critical path during initial object creation and whenever the set of available shards requires reassignments.
Furthermore, webhooks can cause API requests to fail entirely.
To reduce the risk of such failures, the sharder is deployed in a highly available fashion and the webhook is configured with a low timeout and failure policy `Ignore`.
With this, API requests still succeed if the webhook server is shortly unreachable.
In such cases, the object will be unassigned until the next sync of the sharder controller.
I.e., the design prioritizes availability of the API over consistency of assignments.
Also, the sharding mechanism doesn't touch the critical path of actual reconciliations.
### Minimize Impact on the Control Plane
By using label selectors on the watch connections of individual shards, the load on the API server is not changed compared to a single controller instance that watches all objects without a selector.
Additionally, the sharder minimizes the extra load on the API server and etcd when it comes to `LIST` requests of all sharded objects (e.g., during periodic syncs).
For this, it only lists the metadata of the sharded objects (spec and status are irrelevant).
Also, it passes the [request parameter](https://kubernetes.io/docs/reference/using-api/api-concepts/#the-resourceversion-parameter) `resourceVersion=0` to the API server, which causes it to serve the request from the in-memory watch cache instead of performing a quorum read on etcd.
In other words, the design accepts slightly outdated data and with this slightly inconsistent object assignments in favor of better performance and scalability.
## Limitations
### Limited Support for `generateName`
In the first iteration (without the sharder webhook), the object's `uid` was the essential part of the hash key.
With the evolution of the mechanism to assign objects during admission using a mutating webhook, the object's `uid` cannot be used any longer as it is unset during admission for `CREATE` requests.
Hence, the sharder uses the object's `GroupVersionKind`, `namespace`, and `name` for calculating the hash key instead.
This works well and also supports calculating the same hash key for controlled objects by using information from `ownerReferences`.
However, this also means that `generateName` is not supported for resources that are not controlled by other resources in the ring.
The reason is that `generateName` is not set during admission for `CREATE` requests similar to the `uid` field.
Note, that `generateName` is still supported for objects that are controlled by other objects, as the controlled object's own name is not included in the hash key.
This tradeoff seems acceptable, as there are not many good use cases for `generateName`.
In general, using `generateName` in controllers makes it difficult to prevent incorrect actions (e.g., creating too many controlled objects) as the controller needs to track its own actions that used `generateName`.
Instead, using deterministic naming based on the owning object (e.g., spec contents or `uid`) simplifies achieving correctness significantly.
All other use cases of using `generateName` for simply generating a random name of an object one doesn't really care about (e.g., in integration or load tests) can also generate a random suffix on the client side before submitting the request to the API server.
However, if the API server set an object's `uid` or `generateName` before admission for `CREATE` requests, this limitation could be lifted.
================================================
FILE: docs/development.md
================================================
# Development and Testing Setup
This document explains more details of the development and testing setup that is also presented in [Getting Started With Controller Sharding](getting-started.md).
## Development Cluster
The setup's basis is a local [kind](https://kind.sigs.k8s.io/) cluster.
This simplifies developing and testing the project as it comes without additional cost, can be thrown away easily, and one doesn't need to push development images to a remote registry.
In other words, there are no prerequisites for getting started with this project other than a [Go](https://go.dev/) and [Docker](https://www.docker.com/) installation.
```bash
# create a local cluster
make kind-up
# target the kind cluster
export KUBECONFIG=$PWD/hack/kind_kubeconfig.yaml
# delete the local cluster
make kind-down
```
If you want to use another cluster for development (e.g., a remote cluster) simply set the `KUBECONFIG` environment variable as usual and all make commands will target the cluster pointed to by your kubeconfig.
Note that you might need to push images to a remote registry though.
## Components
The development setup reuses the deployment manifests of the main sharding components developed in this repository, located in [`config`](../config).
See [Install the Sharding Components](installation.md).
It also includes the [checksum-controller](../cmd/checksum-controller) as an example sharded controller (see [Implement Sharding in Your Controller](implement-sharding.md)) and the [webhosting-operator](../webhosting-operator/README.md) (see [Evaluating the Sharding Mechanism](evaluation.md)).
Apart from this, the development setup also includes some external components, located in [`hack/config`](../hack/config).
This includes [cert-manager](https://cert-manager.io/), [ingress-nginx](https://kubernetes.github.io/ingress-nginx/), [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus), [kyverno](https://kyverno.io/), and [parca](https://parca.dev/).
These components are installed for a seamless development and testing experience but also for this project's [Evaluation](evaluation.md) on a remote cluster in the cloud.
## Deploying, Building, Running Using Skaffold
Use `make deploy` to deploy all components with pre-built images using [skaffold](https://skaffold.dev/).
You can overwrite the used images via make variables, e.g., the `TAG` variable:
```bash
make deploy TAG=latest
```
For development, skaffold can build fresh images based on your local changes using [ko](https://ko.build/), load them into your local cluster, and deploy the configuration:
```bash
make up
```
Alternatively, you can also start a skaffold-based dev loop which can automatically rebuild and redeploy images as soon as source files change:
```bash
make dev
# runs initial build and deploy...
# press any key to trigger a fresh build after changing sources
```
If you're not working with a local kind cluster, you need to set `SKAFFOLD_DEFAULT_REPO` to a registry that you can push the dev images to:
```bash
make up SKAFFOLD_DEFAULT_REPO=ghcr.io/timebertt/dev-images
```
Remove all components from the cluster:
```bash
make down
```
For any skaffold-based make command, you can set `SKAFFOLD_MODULE` to target only a specific part of the [skaffold configuration](../hack/config/skaffold.yaml):
```bash
make dev SKAFFOLD_MODULE=sharder
```
## Running on the Host Machine
Instead of running the sharder in the cluster, you can also run it on your host machine targeting your local kind cluster.
This doesn't deploy all components as before but only cert-manager for injecting the webhook's CA bundle.
Assuming a fresh kind cluster:
```bash
make run
```
Now, create the `ControllerRing` and run a local `checksum-controller`:
```bash
make run-checksum-controller
```
You should see that the shard successfully announced itself to the sharder:
```bash
$ kubectl get lease -L alpha.sharding.timebertt.dev/controllerring,alpha.sharding.timebertt.dev/state
NAME HOLDER AGE CONTROLLERRING STATE
checksum-controller-lhrlt6h4 checksum-controller-lhrlt6h4 6s checksum-controller ready
$ kubectl get controllerring
NAME READY AVAILABLE SHARDS AGE
checksum-controller True 1 1 13s
```
Running the `checksum-controller` locally gives you the option to test non-graceful termination, i.e., a scenario where the shard fails to renew its lease in time.
Simply press `Ctrl-C` twice:
```bash
make run-checksum-controller
...
^C2023-11-24T15:16:50.948+0100 INFO Shutting down gracefully in 2 seconds, send another SIGINT or SIGTERM to shutdown non-gracefully
^Cexit status 1
```
## Testing the Sharding Setup
Independent of the used setup (skaffold-based or running on the host machine), you should be able to create sharded `Secrets` in the `default` namespace as configured in the `example` `ControllerRing`.
The `ConfigMaps` created by the `checksum-controller` should be assigned to the same shard as the owning `Secret`:
```bash
$ kubectl create secret generic foo --from-literal foo=bar
secret/foo created
$ kubectl get cm,secret -L shard.alpha.sharding.timebertt.dev/checksum-controller
NAME DATA AGE CHECKSUM-CONTROLLER
configmap/checksums-foo 1 1s checksum-controller-lhrlt6h4
NAME TYPE DATA AGE CHECKSUM-CONTROLLER
secret/foo Opaque 1 1s checksum-controller-lhrlt6h4
```
## Monitoring
When using the skaffold-based setup, you also get a full monitoring setup for observing and analyzing the components' resource usage.
To access the monitoring dashboards and metrics in Grafana, simply forward its port and open http://localhost:3000/ in your browser:
```bash
kubectl -n monitoring port-forward svc/grafana 3000 &
```
The password for Grafana's `admin` user is written to `hack/config/monitoring/default/grafana_admin_password.secret.txt`.
Be sure to check out the controller-runtime dashboard: http://localhost:3000/d/PuCBL3zVz/controller-runtime-controllers
## Continuous Profiling
To dig deeper into the components' resource usage, you can deploy the continuous profiling setup based on [Parca](https://parca.dev/):
```bash
make up SKAFFOLD_MODULE=profiling SKAFFOLD_PROFILE=profiling
```
To access the profiling data in Parca, simply forward its port and open http://localhost:7070/ in your browser:
```bash
kubectl -n parca port-forward svc/parca 7070 &
```
For accessing Parca through its `Ingress`, use the basic auth password for the `parca` user from `hack/config/profiling/parca_password.secret.txt`.
Note that the Parca deployment doesn't implement retention for profiling data.
I.e., the Parca data volume will grow infinitely as long as Parca is running.
To shut down Parca after analyzing the collected profiles and destroying the persistent volume use the following command:
```bash
make down SKAFFOLD_MODULE=profiling SKAFFOLD_PROFILE=profiling
```
================================================
FILE: docs/evaluation.md
================================================
# Evaluating the Sharding Mechanism
This guide describes how the sharding mechanism implemented in this repository is evaluated and outlines the key results of the evaluation performed in the associated [Master's thesis](https://github.com/timebertt/masters-thesis-controller-sharding).
Please refer to the thesis' evaluation section for more details.
## Components
The evaluation setup builds upon the [Development and Testing Setup](development.md) but adds a few more components.
To demonstrate and evaluate the implemented sharding mechanisms using a fully functioning controller, a dedicated example operator was developed: the [webhosting-operator](../webhosting-operator/README.md).
While the webhosting-operator is developed in the same repository, it only serves as an example.
When deploying the sharding components using `make deploy` or `make up`, the webhosting-operator is automatically deployed along with the other evaluation components.
Assuming you're in the repository's root directory, you can deploy the webhosting-operator using:
```bash
# deploy the webhosting-operator using pre-built images
make deploy SKAFFOLD_MODULE=webhosting-operator TAG=latest
# alternatively, build and deploy fresh images
make up SKAFFOLD_MODULE=webhosting-operator
```
To perform a quick test of the webhosting-operator, create some example `Website` objects:
```bash
$ kubectl apply -k webhosting-operator/config/samples
...
$ kubectl -n project-foo get website,deploy,ing,svc,cm -L shard.alpha.sharding.timebertt.dev/webhosting-operator
NAME THEME PHASE SINCE AGE WEBHOSTING-OPERATOR
website.webhosting.timebertt.dev/kubecon exciting Ready 1s 3s webhosting-operator-5f7854768d-8n59m
website.webhosting.timebertt.dev/library lame Ready 1s 3s webhosting-operator-5f7854768d-j67tj
NAME READY UP-TO-DATE AVAILABLE AGE WEBHOSTING-OPERATOR
deployment.apps/kubecon-b5ed55 1/1 1 1 3s webhosting-operator-5f7854768d-8n59m
deployment.apps/library-185298 1/1 1 1 3s webhosting-operator-5f7854768d-j67tj
NAME CLASS HOSTS ADDRESS PORTS AGE WEBHOSTING-OPERATOR
ingress.networking.k8s.io/kubecon-b5ed55 nginx webhosting.timebertt.dev 80, 443 3s webhosting-operator-5f7854768d-8n59m
ingress.networking.k8s.io/library-185298 nginx webhosting.timebertt.dev 80, 443 3s webhosting-operator-5f7854768d-j67tj
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE WEBHOSTING-OPERATOR
service/kubecon-b5ed55 ClusterIP 100.82.167.176 <none> 8080/TCP 3s webhosting-operator-5f7854768d-8n59m
service/library-185298 ClusterIP 100.82.224.52 <none> 8080/TCP 3s webhosting-operator-5f7854768d-j67tj
NAME DATA AGE WEBHOSTING-OPERATOR
configmap/kubecon-b5ed55 2 3s webhosting-operator-5f7854768d-8n59m
configmap/library-185298 2 3s webhosting-operator-5f7854768d-j67tj
```
You can now visit the created websites at http://localhost:8088/project-foo/homepage and http://localhost:8088/project-foo/official.
You can also visit your [local webhosting dashboard](http://127.0.0.1:3000/d/NbmNpqEnk/webhosting?orgId=1) after forwarding the Grafana port:
```bash
kubectl -n monitoring port-forward svc/grafana 3000
```
This dashboard uses metrics exported by [webhosting-operator](../webhosting-operator/pkg/metrics) about its API objects, i.e., `kube_website_*` and `kube_theme_*`.
There is also a dashboard about the [sharding of websites](http://127.0.0.1:3000/d/7liIybkVk/sharding?orgId=1).
In addition to creating the preconfigured websites, you can also generate some more random websites using the [samples-generator](../webhosting-operator/cmd/samples-generator):
```bash
# create a random number of websites per project namespace (up to 50 each)
$ go run ./webhosting-operator/cmd/samples-generator
created 32 Websites in project "project-foo"
```
## Load Tests
The [experiment](./cmd/experiment) tool allows executing different scenarios for load testing the webhosting-operator, which are used for evaluating the sharding mechanism:
```text
$ go run ./webhosting-operator/cmd/experiment -h
Usage:
experiment [command]
Available Scenarios
basic Basic load test, create 9k websites in 15 minutes
chaos Create 4.5k websites over 15 minutes and terminate a random shard every 5 minutes
rolling-update Create 9k websites in 15 minutes while rolling the operator
scale-out Measure scale-out properties with a high churn rate
...
```
A load test scenario can be executed using one of these commands:
```bash
# run the basic scenario from your development machine (not recommended)
go run ./cmd/experiment basic
# build the experiment image and run the basic scenario as a Job on the cluster
make up SKAFFOLD_MODULE=experiment EXPERIMENT_SCENARIO=basic
# use a pre-built experiment image to run the basic scenario as a Job on the cluster
make deploy SKAFFOLD_MODULE=experiment EXPERIMENT_SCENARIO=basic TAG=latest
```
All scenarios put load on webhosting-operator by creating and mutating a large amount of `Website` objects.
However, creating soo many `Websites` would waste immense compute power just to run thousands of dummy websites.
Hence, webhosting-operator creates `Deployments` of `Websites` in load tests with `spec.replicas=0`.
It also doesn't expose `Websites` created in load tests via `Ingress` objects by setting `spec.ingressClassName=fake`.
Otherwise, this would overload the ingress controller, which is not what the experiment is actually supposed to load test.
When running load test experiments on the cluster, a `ServiceMonitor` is created to instruct prometheus to scrape `experiment`.
As the tool is based on controller-runtime as well, the controller-runtime metrics can be used for visualizing the load test scenario and verifying that the tool is able to generate the desired load.
## Experiment Setup
As a local kind cluster cannot handle such high load, a remote cluster is used to perform the load test experiments.
For this, a [Gardener](https://github.com/gardener/gardener) installation on [STACKIT](https://www.stackit.de/en/) is used to create a cluster based on the [sample manifest](../hack/config/shoot.yaml).
[external-dns](https://github.com/kubernetes-sigs/external-dns) is used for publicly exposing the monitoring and continuous profiling endpoints, as well as `Websites` created outside of load test experiments.
```bash
# gardenctl target --garden ...
kubectl apply -f hack/config/shoot.yaml
# gardenctl target --shoot ...
kubectl apply --server-side -k hack/config/external-dns
kubectl -n external-dns create secret generic google-clouddns-timebertt-dev --from-literal project=$PROJECT_NAME --from-file service-account.json=$SERVICE_ACCOUNT_FILE
# gardenctl target --control-plane
kubectl apply --server-side -k hack/config/policy/controlplane
```
In addition to the described components, [kyverno](https://github.com/kyverno/kyverno) is deployed to the cluster itself (shoot cluster) and to the control plane (seed cluster).
In the cluster itself, kyverno policies are used for scheduling the sharder and webhosting-operator to the dedicated `sharding` worker pool and experiment to the dedicated `experiment` worker pool.
This makes sure that these components run on machines isolated from other system components and don't content for compute resources during load tests.
Furthermore, kyverno policies are added to the control plane to ensure a static size of etcd, kube-apiserver, and kube-controller-manager (requests=limits for guaranteed resources, disable vertical autoscaling, 4 replicas of kube-apiserver and disable horizontal autoscaling).
Also, kube-controller-manager's client-side rate limiting is disabled (ref https://github.com/timebertt/kubernetes-controller-sharding/pull/610, [SIG api-machinery recommendation](https://kubernetes.slack.com/archives/C0EG7JC6T/p1680889646346859?thread_ts=1680791299.631439&cid=C0EG7JC6T)) and HTTP/2 is disabled so that API requests are distributed across API server instances (ref https://github.com/gardener/gardener/issues/8810).
This is done to make load test experiments more stable and their results more reproducible.
## Measurements
After executing a load test experiment, the [measure](../webhosting-operator/cmd/measure) tool is used for retrieving the key metrics from Prometheus.
It takes a configurable set of measurements in the form of Prometheus queries and stores them in CSV-formatted files for further analysis (with numpy/pandas) and visualization (with matplotlib).
Please see the [results directory](https://github.com/timebertt/masters-thesis-controller-sharding/tree/main/results) in the Master's thesis' repository for the exact measurements taken.
The scale of the controller setup is measured in two dimensions:
1. The number of API objects that the controller watches and reconciles.
2. The churn rate of API objects, i.e., the rate of object creations, updates, and deletions.
```yaml
queries:
- name: website-count # dimension 1
query: |
sum(kube_website_info)
- name: website-churn # dimension 2
query: |
sum(rate(
controller_runtime_reconcile_total{
job="experiment", result!="error",
controller=~"website-(generator|deleter|mutator)"
}[1m]
)) by (controller)
```
## SLIs / SLOs
To consider a controller setup as performing adequately, the following SLOs
need to be satisfied:
1. The time of enqueuing object keys for reconciliation for every controller, measured as the 99th percentile per cluster-day, is at maximum 1 second.
2. The latency of realizing the desired state of objects for every controller, excluding reconciliation time of controlled objects, until observed by a watch request, measured as the 99th percentile per cluster-day, is at maximum x, where x depends on the controller.
In case of the `Website` controller, 5 is chosen for x.
```yaml
queries:
- name: latency-queue # SLO 1
type: instant
slo: 1
query: |
histogram_quantile(0.99, sum by (le) (rate(
workqueue_queue_duration_seconds_bucket{
job="webhosting-operator", name="website"
}[$__range]
)))
- name: latency-reconciliation # SLO 2
type: instant
slo: 5
query: |
histogram_quantile(0.99, sum by (le) (rate(
experiment_website_reconciliation_duration_seconds_bucket{
job="experiment"
}[$__range]
)))
```
## Comparison
The following graphs show the generated load and compare the resulting CPU, memory, and network usage of the components in three different setups when running the `basic` experiment scenario (~9k websites created over 15m):
- external sharder: 3 webhosting-operator pods (shards) + 2 sharder pods (the new approach implemented in this repository, second iteration for the Master's thesis)
- internal sharder: 3 webhosting-operator pods (3 shards, 1 acts as the sharder) (the old approach, first iteration for the study project)
- singleton: 1 webhosting-operator pod (traditional leader election setup without sharding)




The new external sharding approach proves to scale best.
The individual shards consume about a third of the singleton controller's usage (close to optimum).
Also, the sharder pods consume a low static amount of resources.
Most importantly, the sharder's resource usage is independent of the number of sharded objects.
## Horizontal Scalability
To evaluate the horizontal scalability of the sharding mechanism (external sharder), the maximum load capacity is determined for different numbers of instances (1, 2, 3, 4, 5).
While the load increases, cumulative SLIs from the start of the experiment are calculated.
When the cumulative SLI grows above the SLO, the current count and churn rate are the maximum load capacity.
As shown in the last plot, the system's capacity increases almost linearly with the number of added instances.



================================================
FILE: docs/getting-started.md
================================================
# Getting Started With Controller Sharding
This guide walks you through getting started with controller sharding in a local cluster.
It sets up the sharder and an example sharded controller so that you can see the components in action.
This is great for trying out the project for the first time and learning about the basic concepts.
## Setup
Create a local cluster using [kind](https://kind.sigs.k8s.io/) and deploy all components:
```bash
make kind-up
export KUBECONFIG=$PWD/hack/kind_kubeconfig.yaml
make deploy TAG=latest
```
The sharder is running in the `sharding-system` namespace and the example shard (checksum-controller) is deployed to the `default` namespace:
```bash
$ kubectl -n sharding-system get po
NAME READY STATUS RESTARTS AGE
sharder-99fcf97b4-hpm6w 1/1 Running 0 17s
sharder-99fcf97b4-zr7rj 1/1 Running 0 17s
$ kubectl get po
NAME READY STATUS RESTARTS AGE
checksum-controller-c95c4fdb6-7jb2v 1/1 Running 0 18s
checksum-controller-c95c4fdb6-hv8pb 1/1 Running 0 18s
checksum-controller-c95c4fdb6-rtvrm 1/1 Running 0 18s
```
## The `ControllerRing` and `Lease` Objects
We can see that the `ControllerRing` object is ready and reports 3 available shards out of 3 total shards:
```bash
$ kubectl get controllerring checksum-controller
NAME READY AVAILABLE SHARDS AGE
checksum-controller True 3 3 25s
```
All shards announce themselves to the sharder by maintaining an individual `Lease` object with the `alpha.sharding.timebertt.dev/controllerring` label.
We can observe that the sharder recognizes all shards as available by looking at the `alpha.sharding.timebertt.dev/state` label:
```bash
$ kubectl get lease -L alpha.sharding.timebertt.dev/controllerring,alpha.sharding.timebertt.dev/state
NAME HOLDER AGE CONTROLLERRING STATE
checksum-controller-c95c4fdb6-7jb2v checksum-controller-c95c4fdb6-7jb2v 44s checksum-controller ready
checksum-controller-c95c4fdb6-hv8pb checksum-controller-c95c4fdb6-hv8pb 44s checksum-controller ready
checksum-controller-c95c4fdb6-rtvrm checksum-controller-c95c4fdb6-rtvrm 44s checksum-controller ready
```
The `ControllerRing` object specifies which API resources should be sharded.
Optionally, it allows selecting the namespaces in which API resources are sharded:
```yaml
apiVersion: sharding.timebertt.dev/v1alpha1
kind: ControllerRing
metadata:
name: checksum-controller
spec:
resources:
- group: ""
resource: secrets
controlledResources:
- group: ""
resource: configmaps
namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: default
```
In our case, the `checksum-controller` reconciles `Secrets` in the `default` namespace and creates a `ConfigMap` including the secret data's checksums.
The created `ConfigMaps` are controlled by the respective `Secret`, i.e., there they have an `ownerReference` with `controller=true` to the `Secret`.
## The Sharder Webhook
The sharder created a `MutatingWebhookConfiguration` for the resources listed in our `ControllerRing` specification:
```bash
$ kubectl get mutatingwebhookconfiguration -l alpha.sharding.timebertt.dev/controllerring=checksum-controller
NAME WEBHOOKS AGE
controllerring-checksum-controller 1 71s
```
Let's examine the webhook configuration for more details.
We can see that the webhook targets a ring-specific path served by the `sharder`.
It reacts on `CREATE` and `UPDATE` requests of the configured resources, where the object doesn't have the ring-specific shard label.
I.e., it gets called for unassigned objects and adds the shard assignment label during admission.
```yaml
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: controllerring-checksum-controller
webhooks:
- clientConfig:
service:
name: sharder
namespace: sharding-system
path: /webhooks/sharder/controllerring/checksum-controller
port: 443
name: sharder.sharding.timebertt.dev
namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: default
objectSelector:
matchExpressions:
- key: shard.alpha.sharding.timebertt.dev/checksum-controller
operator: DoesNotExist
rules:
- apiGroups:
- ""
apiVersions:
- '*'
operations:
- CREATE
- UPDATE
resources:
- secrets
scope: '*'
- apiGroups:
- ""
apiVersions:
- '*'
operations:
- CREATE
- UPDATE
resources:
- configmaps
scope: '*'
```
## Creating Sharded Objects
We can observe the behavior of the webhook by creating a first example object.
When we create a `Secret`, the webhook assigns it to one of the available controller instances by adding the ring-specific shard label.
It performs a consistent hashing algorithm, where it hashes both the object's key (consisting of API group, `kind`, `namespace`, and `name`) and the shards' names onto a virtual ring.
It picks the shard with the hash value that is next to the object's hash clock-wise.
```bash
$ kubectl create secret generic foo --from-literal foo=bar -oyaml
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
labels:
shard.alpha.sharding.timebertt.dev/checksum-controller: checksum-controller-c95c4fdb6-hv8pb
name: foo
namespace: default
type: Opaque
```
We can see that the responsible shard reconciled the `Secret` and created a `ConfigMap` for it.
Similar to the `Secret`, the `ConfigMap` was also assigned by the webhook.
In this case however, the sharder uses the information about the owning `Secret` for calculating the object's hash key.
With this, owned objects are always assigned to the same shard as their owner.
This is done because the controller typically needs to reconcile the owning object whenever the status of an owned object changes.
E.g., the `Deployment` controller watches `ReplicaSets` and continues rolling updates of the owning `Deployment` as soon as the owned `ReplicaSet` has the number of wanted replicas.
```bash
$ kubectl get configmap checksums-foo -oyaml
apiVersion: v1
data:
foo: fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9
kind: ConfigMap
metadata:
labels:
shard.alpha.sharding.timebertt.dev/checksum-controller: checksum-controller-c95c4fdb6-hv8pb
name: checksums-foo
namespace: default
ownerReferences:
- apiVersion: v1
controller: true
kind: Secret
name: foo
```
Let's create a few more `Secrets` and observe the distribution of objects across shards:
```bash
$ for i in $(seq 1 9); do k create secret generic foo$i ; done
$ kubectl get secret,configmap -L shard.alpha.sharding.timebertt.dev/checksum-controller
NAME TYPE DATA AGE CHECKSUM-CONTROLLER
secret/foo Opaque 1 39s checksum-controller-c95c4fdb6-hv8pb
secret/foo1 Opaque 0 10s checksum-controller-c95c4fdb6-rtvrm
secret/foo2 Opaque 0 10s checksum-controller-c95c4fdb6-7jb2v
secret/foo3 Opaque 0 10s checksum-controller-c95c4fdb6-rtvrm
secret/foo4 Opaque 0 10s checksum-controller-c95c4fdb6-hv8pb
secret/foo5 Opaque 0 10s checksum-controller-c95c4fdb6-rtvrm
secret/foo6 Opaque 0 10s checksum-controller-c95c4fdb6-hv8pb
secret/foo7 Opaque 0 10s checksum-controller-c95c4fdb6-7jb2v
secret/foo8 Opaque 0 10s checksum-controller-c95c4fdb6-rtvrm
secret/foo9 Opaque 0 10s checksum-controller-c95c4fdb6-hv8pb
NAME DATA AGE CHECKSUM-CONTROLLER
configmap/checksums-foo 1 39s checksum-controller-c95c4fdb6-hv8pb
configmap/checksums-foo1 0 10s checksum-controller-c95c4fdb6-rtvrm
configmap/checksums-foo2 0 10s checksum-controller-c95c4fdb6-7jb2v
configmap/checksums-foo3 0 10s checksum-controller-c95c4fdb6-rtvrm
configmap/checksums-foo4 0 10s checksum-controller-c95c4fdb6-hv8pb
configmap/checksums-foo5 0 10s checksum-controller-c95c4fdb6-rtvrm
configmap/checksums-foo6 0 10s checksum-controller-c95c4fdb6-hv8pb
configmap/checksums-foo7 0 10s checksum-controller-c95c4fdb6-7jb2v
configmap/checksums-foo8 0 10s checksum-controller-c95c4fdb6-rtvrm
configmap/checksums-foo9 0 10s checksum-controller-c95c4fdb6-hv8pb
```
## Removing Shards From the Ring
Let's see what happens when the set of available shards changes.
We can observe the actions that the sharder takes using `kubectl get secret --show-labels -w --output-watch-events --watch-only` in a new terminal session.
First, let's scale down the sharded controller to remove one shard from the ring:
```bash
$ kubectl scale deployment checksum-controller --replicas 2
deployment.apps/checksum-controller scaled
```
The shard releases its `Lease` by setting the `holderIdentity` field to the empty string.
The sharder recognizes that the shard was removed from the ring and sets its state to `dead`.
With this, the shard is no longer considered for object assignments.
The orphaned `Lease` is cleaned up after 1 minute.
```bash
$ kubectl get lease -L alpha.sharding.timebertt.dev/state
NAME HOLDER AGE STATE
checksum-controller-c95c4fdb6-7jb2v checksum-controller-c95c4fdb6-7jb2v 3m34s ready
checksum-controller-c95c4fdb6-hv8pb checksum-controller-c95c4fdb6-hv8pb 3m34s ready
checksum-controller-c95c4fdb6-rtvrm 3m34s dead
```
We can observe that the sharder immediately moved objects that were assigned to the removed shard to the remaining available shards.
For this, the sharder controller simply removes the shard label on all affected objects and lets the webhook reassign them.
As the original shard is not available anymore, moving the objects doesn't need to be coordinated and the sharder can immediately move objects.
```bash
$ kubectl get secret --show-labels -w --output-watch-events --watch-only
EVENT NAME TYPE DATA AGE LABELS
MODIFIED foo1 Opaque 0 48s shard.alpha.sharding.timebertt.dev/checksum-controller=checksum-controller-c95c4fdb6-hv8pb
MODIFIED foo3 Opaque 0 48s shard.alpha.sharding.timebertt.dev/checksum-controller=checksum-controller-c95c4fdb6-7jb2v
MODIFIED foo5 Opaque 0 48s shard.alpha.sharding.timebertt.dev/checksum-controller=checksum-controller-c95c4fdb6-hv8pb
MODIFIED foo8 Opaque 0 48s shard.alpha.sharding.timebertt.dev/checksum-controller=checksum-controller-c95c4fdb6-7jb2v
```
## Adding Shards to the Ring
Now, let's scale up our sharded controller to add a new shard to the ring.
```bash
$ kubectl scale deployment checksum-controller --replicas 3
deployment.apps/checksum-controller scaled
```
We can observe that the new `Lease` object is in state `ready`.
With this, the new shard is immediately considered for assignment of new objects.
```bash
$ kubectl get lease -L alpha.sharding.timebertt.dev/state
NAME HOLDER AGE STATE
checksum-controller-c95c4fdb6-7jb2v checksum-controller-c95c4fdb6-7jb2v 4m19s ready
checksum-controller-c95c4fdb6-hv8pb checksum-controller-c95c4fdb6-hv8pb 4m19s ready
checksum-controller-c95c4fdb6-kdrss checksum-controller-c95c4fdb6-kdrss 4s ready
```
In this case, a rebalancing needs to happen and the sharder needs to move objects away from available shards to the new shard.
In contrast to moving objects from unavailable shards, this needs to be coordinated to prevent multiple shards from acting on the same object concurrently.
Otherwise, the shards might perform conflicting actions which might lead to a broken state of the objects.
For this, the sharder adds the drain label to all objects that should be moved to the new shard.
This asks the currently responsible shard to stop reconciling the object and acknowledge the movement.
As soon as the controller observes the drain label, it removes it again along with the shard label.
This triggers the sharder webhook which immediately assigns the object to the desired shard.
```bash
$ kubectl get secret --show-labels -w --output-watch-events --watch-only
EVENT NAME TYPE DATA AGE LABELS
MODIFIED foo5 Opaque 0 116s drain.alpha.sharding.timebertt.dev/checksum-controller=true,shard.alpha.sharding.timebertt.dev/checksum-controller=checksum-controller-c95c4fdb6-hv8pb
MODIFIED foo8 Opaque 0 116s drain.alpha.sharding.timebertt.dev/checksum-controller=true,shard.alpha.sharding.timebertt.dev/checksum-controller=checksum-controller-c95c4fdb6-7jb2v
MODIFIED foo5 Opaque 0 116s shard.alpha.sharding.timebertt.dev/checksum-controller=checksum-controller-c95c4fdb6-kdrss
MODIFIED foo8 Opaque 0 116s shard.alpha.sharding.timebertt.dev/checksum-controller=checksum-controller-c95c4fdb6-kdrss
```
## Clean Up
Simply delete the local cluster to clean up:
```bash
make kind-down
```
## Where To Go From Here?
Now, you should have a basic understanding of how sharding for Kubernetes controllers works.
If you want to learn more about the individual components, the sharding architecture, and the reasoning behind it, see [Design](design.md).
You might also be interested in reading the [Evaluation](evaluation.md) document about load tests for sharded controllers and how this project helps in scaling Kubernetes controllers.
If you want to use sharding for your own controllers, see [Implement Sharding in Your Controller](implement-sharding.md).
To further experiment with the setup from this guide or to start developing changes to the sharding components, see [Development and Testing Setup](development.md).
You can also take a look at the remaining docs in the [documentation index](README.md).
================================================
FILE: docs/implement-sharding.md
================================================
# Implement Sharding in Your Controller
This guide walks you through implementing sharding for your own controller.
Prerequisite for using a sharded controller setup is to install the sharding components in the cluster, see [Install the Sharding Components](installation.md).
## Configuring the `ControllerRing`
After installing the sharding components, you can go ahead and configure a `ControllerRing` object for your controller.
For all controllers that you want to shard, configure the controller's main resource and the controlled resources in `ControllerRing.spec.resources`.
As an example, let's consider a subset of kube-controller-manager's controllers: `Deployment` and `ReplicaSet`.
- The `Deployment` controller reconciles the `deployments` resource and controls `replicasets`.
- The `ReplicaSet` controller reconciles the `replicaset` resource and controls `pods`.
The corresponding `ControllerRing` for the `Deployment` controller would need to be configured like this:
```yaml
apiVersion: sharding.timebertt.dev/v1alpha1
kind: ControllerRing
metadata:
name: kube-controller-manager-deployment
spec:
resources:
- group: apps
resource: deployments
controlledResources:
- group: apps
resource: replicasets
```
Note that the `ControllerRing` name must not be longer than 63 characters because it is used as part of the shard and drain label key (see below).
To allow the sharder to reassign the sharded objects during rebalancing, we need to grant the corresponding permissions.
We need to grant these permissions explicitly depending on what is configured in the `ControllerRing`.
Otherwise, the sharder would basically require `cluster-admin` access.
For the above example, we would use these RBAC manifests:
```yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: sharding:controllerring:kube-controller-manager
rules:
- apiGroups:
- apps
resources:
- deployments
- replicaset
verbs:
- list
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: sharding:controllerring:kube-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: sharding:controllerring:kube-controller-manager
subjects:
- kind: ServiceAccount
name: sharder
namespace: sharding-system
```
## Implementation Changes
To support sharding in your Kubernetes controller, only three aspects need to be implemented:
- announce ring membership and shard health: maintain individual shard `Leases` instead of performing leader election on a single `Lease`
- only watch, cache, and reconcile objects assigned to the respective shard: add a shard-specific label selector to watches
- acknowledge object movements during rebalancing: remove the drain and shard label when the drain label is set and stop reconciling the object
[`pkg/shard`](../pkg/shard) contains reusable reference implementations for these aspects.
[`cmd/checksum-controller`](../cmd/checksum-controller) serves as an example implementation for sharded controllers that shows how to put the pieces together in controllers based on [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime).
However, sharding can also be implemented in controllers that don't use controller-runtime or that are written in another programming language than Go.
The following sections outline the exact requirements that a sharded controller needs to fulfill and then show how to implement them in controllers based on controller-runtime.
Don't be scared by the long descriptions.
Implementing these aspects is simple (especially if reusing the helpers designed for controller-runtime controllers) and only needs to be done once.
The long descriptions just make sure the requirements are perfectly clear if you need to implement them yourself.
### Shard Lease
In short: ensure your shard maintains a `Lease` object like this and only runs its controllers as long as it holds the `Lease`:
```yaml
apiVersion: coordination.k8s.io/v1
kind: Lease
metadata:
labels:
alpha.sharding.timebertt.dev/controllerring: my-controllerring
name: my-operator-565df55f4b-5vwpj
namespace: operator-system
spec:
holderIdentity: my-operator-565df55f4b-5vwpj # needs to equal the Lease's name
leaseDurationSeconds: 15 # pick whatever you would use for leader election as well
```
Most controllers already perform leader election using a central `Lease` lock object.
Only if the instance is elected as the leader, it is allowed to run the controllers.
If it fails to renew the `Lease` in time, another instance is allowed to acquire the `Lease` and can run the controllers.
Hence, an instance must not run any controllers when it looses its `Lease`.
In fact, most implementations exit the entire process when failing to renew the lock for safety.
On graceful termination (e.g., during a rolling update), the active leader may release the lock by setting the `holderIdentity` field of the `Lease` to the empty string.
This allows another instance to acquire the `Lease` immediately without waiting for it to expire, which helps in quick leadership handovers.
The same mechanisms apply to sharded controllers.
But instead of using a central `Lease` object for all instances, each instance acquires and maintains its own `Lease` object to announce itself to the sharder.
A shard may only run its controllers as long as it holds its shard `Lease`.
I.e., when it fails to renew the shard `Lease` in time, it also needs to stop all controllers.
Similar to usual leader election, a shard may release its own shard `Lease` on graceful termination by removing the `holderIdentity`.
This immediately triggers reassignments by the sharder to minimize the duration where no shard is acting on a subset of objects.
In essence, all the existing machinery for leader election can be reused for maintaining the shard `Lease` – that is, with two minor changes.
First, the shard `Lease` needs to be labelled with `alpha.sharding.timebertt.dev/controllerring=<controllerring-name>` to specify which `ControllerRing` the shard belongs to.
Second, the name of the shard `Lease` needs to match the `holderIdentity`.
By default, the instance's hostname is used for both values.
If the `holderIdentity` differs from the name, the sharder assumes that the shard is unavailable.
In controller-runtime, you can configure your shard to maintain its shard `Lease` as follows:
```go
package main
import (
shardlease "github.com/timebertt/kubernetes-controller-sharding/pkg/shard/lease"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
func run() error {
restConfig := config.GetConfigOrDie()
shardLease, err := shardlease.NewResourceLock(restConfig, shardlease.Options{
ControllerRingName: "my-controllerring",
})
if err != nil {
return err
}
mgr, err := manager.New(restConfig, manager.Options{
// SHARD LEASE
// Use manager's leader election mechanism for maintaining the shard lease.
// With this, controllers will only run as long as manager holds the shard lease.
// After graceful termination, the shard lease will be released.
LeaderElection: true,
LeaderElectionResourceLockInterface: shardLease,
LeaderElectionReleaseOnCancel: true,
// other options ...
})
if err != nil {
return err
}
// add controllers and start manager as usual ...
return nil
}
```
Note that if you're using controller-runtime, the same manager instance cannot run sharded and non-sharded controllers as a manager can only run under a single resource lock (either leader election or shard lease).
### Filtered Watch Cache
In short: use the following label selector on watches for all sharded resources listed in the `ControllerRing`.
```text
shard.alpha.sharding.timebertt.dev/my-controllerring: my-operator-565df55f4b-5vwpj
```
The sharder assigns all sharded objects by adding a shard label that is specific to the `ControllerRing` (resources could be part of multiple `ControllerRings`).
The shard label's key consists of the `shard.alpha.sharding.timebertt.dev/` prefix followed by the `ControllerRing` name.
As the key part after the `/` must not exceed 63 characters, the `ControllerRing` name must not be longer than 63 characters.
The shard label's value is the name of the shard, i.e., the name of the shard lease and the shard lease's `holderIdentity`.
Once you have determined the shard label key for your `ControllerRing`, use it as a selector on all watches that your controller starts for any of the sharded resources.
With this, the shard will only cache the objects assigned to it and the controllers will only reconcile these objects.
Note that when you use a label or field selector on a watch connection and the label or field changes so that the selector doesn't match anymore, the API server will emit a `DELETE` watch event.
In controller-runtime, you can configure your shard to only watch and reconcile assigned objects as follows.
This snippet works with controller-runtime v0.16 and v0.17, other versions might require deviating configuration.
```go
package main
import (
shardingv1alpha1 "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/sharding/v1alpha1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
func run() error {
// ...
mgr, err := manager.New(restConfig, manager.Options{
// FILTERED WATCH CACHE
Cache: cache.Options{
// Configure cache to only watch objects that are assigned to this shard.
// This controller only watches sharded objects, so we can configure the label selector on the cache's global level.
// If your controller watches sharded objects as well as non-sharded objects, use cache.Options.ByObject to configure
// the label selector on object level.
DefaultLabelSelector: labels.SelectorFromSet(labels.Set{
shardingv1alpha1.LabelShard("my-controllerring"): shardLease.Identity(),
}),
},
// other options ...
})
// ...
}
```
### Acknowledge Drain Operations
In short: ensure your sharded controller acknowledges drain operations.
When the drain label like this is added by the sharder, the controller needs to remove both the shard and the drain label and stop reconciling the object.
```text
drain.alpha.sharding.timebertt.dev/my-controllerring
```
When the sharder needs to move an object from an available shard to another shard for rebalancing, it first adds the drain label to instruct the currently responsible shard to stop reconciling the object.
The shard needs to acknowledge this operation, as the sharder must prevent concurrent reconciliations of the same object in multiple shards.
The drain label's key is specific to the `ControllerRing` and follows the same pattern as the shard label (see above).
The drain label's value is irrelevant, only the presence of the label is relevant.
Apart from changing the controller's business logic to first check the drain label, also ensure that the watch event filtering logic (predicates) always reacts on events with the drain label set independent of the controller's actual predicates.
In controller-runtime, you can reuse the helpers for constructing correct predicates and a wrapping reconciler that correctly implements the drain operation as follows:
```go
package controller
import (
shardcontroller "github.com/timebertt/kubernetes-controller-sharding/pkg/shard/controller"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
// AddToManager adds a controller to the manager.
// shardName must match the shard lease's name/identity.
func (r *Reconciler) AddToManager(mgr manager.Manager, controllerRingName, shardName string) error {
// ACKNOWLEDGE DRAIN OPERATIONS
// Use the shardcontroller package as helpers for:
// - a predicate that triggers when the drain label is present (even if the actual predicates don't trigger)
// - wrapping the actual reconciler a reconciler that handles the drain operation for us
return builder.ControllerManagedBy(mgr).
For(&corev1.Secret{}, builder.WithPredicates(shardcontroller.Predicate(controllerRingName, shardName, MySecretPredicate()))).
Owns(&corev1.ConfigMap{}, builder.WithPredicates(MyConfigMapPredicate())).
Complete(
shardcontroller.NewShardedReconciler(mgr).
For(&corev1.Secret{}). // must match the kind in For() above
InControllerRing(controllerRingName).
WithShardName(shardName).
MustBuild(r),
)
}
```
================================================
FILE: docs/installation.md
================================================
# Install the Sharding Components
This guide walks you through installing the sharding components from this repository in your cluster.
This procedure is independent of the controller that you want to use sharding for.
## Main Components (required)
For now, only [kustomize](https://kustomize.io/) is supported as the deployment tool.
All deployment manifests for this repository's components are located in [`config`](../config) and can be used from there.
The `config/default` variant holds the default configuration for the sharding components.
It requires [cert-manager](https://cert-manager.io/) to be installed in the cluster for managing the webhook certificates.
Apply it using `kubectl`:
```bash
kubectl apply --server-side -k "https://github.com/timebertt/kubernetes-controller-sharding//config/default?ref=main"
```
You can customize the configuration using the usual kustomize mechanisms:
```bash
cat >kustomization.yaml <<EOF
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- https://github.com/timebertt/kubernetes-controller-sharding//config/default?ref=main
images:
- name: ghcr.io/timebertt/kubernetes-controller-sharding/sharder
newTag: latest
replicas:
- name: sharder
count: 3
EOF
kubectl apply --server-side -k .
```
If you can't apply the default configuration, e.g., because you don't want to use cert-manager for managing the webhook certificates, you can also apply the dedicated configurations individually.
First apply the CRD and `sharding-system` namespace using the `config/crds` configuration, then apply the `sharder` itself using the `config/sharder` configuration.
Be sure to mount your webhook server cert to `/tmp/k8s-webhook-server/serving-certs/tls.{crt,key}`.
## Monitoring (optional)
`config/monitoring` contains a `ServiceMonitor` for configuring metrics scraping for the sharder using the [prometheus-operator](https://prometheus-operator.dev/).
See [Monitoring the Sharding Components](monitoring.md) for more information on the exposed metrics.
================================================
FILE: docs/monitoring.md
================================================
# Monitoring the Sharding Components
This document explains the metrics exposed by the sharder for monitoring the sharding components.
The `sharder` service exposes metrics via `https` on the `8080` port at the `/metrics` endpoint.
Clients need to authenticate against the endpoint and must be authorized for `get` on the `nonResourceURL` `/metrics`.
Refer to the example [`ServiceMonitor` manifest](../config/monitoring/servicemonitor.yaml) for more details.
Also see [Install the Sharding Components](installation.md#monitoring-optional).
Note that all `sharder` instances export the same state metrics for high availability.
Accordingly, you should use suitable aggregation functions for deduplicating time series, e.g.:
```
max without (instance, pod) (controller_sharding_shard_state)
```
## Sharding Operations Metrics
### `controller_sharding_assignments_total`
Type: counter
Description: Total number of shard assignments by the sharder webhook per `ControllerRing` and GroupResource.
This counter is incremented every time the mutating webhook of the sharder assigns a sharded object (excluding dry-run requests).
### `controller_sharding_movements_total`
Type: counter
Description: Total number of shard movements triggered by the sharder controller per `ControllerRing` and GroupResource.
This counter is incremented every time the sharder controller triggers a direct object assignment, i.e., when an object needs to be moved away from an unavailable shard (or when an object has missed the webhook and needs to be assigned).
This only considers the sharder controller's side, i.e., the `controller_sharding_assignments_total` counter is incremented as well when the controller successfully triggers an assignment by the webhook.
### `controller_sharding_drains_total`
Type: counter
Description: Total number of shard drains triggered by the sharder controller per `ControllerRing` and GroupResource.
This counter is incremented every time the sharder controller triggers a drain operation, i.e., when an object needs to be moved away from an available shard.
This only considers the sharder controller's side, i.e., the `controller_sharding_assignments_total` counter is incremented as well when the shard removes the drain label as expect and thereby triggers an assignment by the webhook.
This doesn't consider the action taken by the shard.
### `controller_sharding_ring_calculations_total`
Type: counter
Description: Total number of hash ring calculations per `ControllerRing`.
This counter is incremented every time the sharder calculates a new consistent hash ring based on the shard leases.
## `ControllerRing` State Metrics
### `controller_sharding_controllerring_metadata_generation`
Type: gauge
Description: The generation of a `ControllerRing`.
### `controller_sharding_controllerring_observed_generation`
Type: gauge
Description: The latest generation observed by the `ControllerRing` controller.
### `controller_sharding_controllerring_status_shards`
Type: gauge
Description: The ControllerRing's total number of shards observed by the `ControllerRing` controller.
### `controller_sharding_controllerring_status_available_shards`
Type: gauge
Description: The `ControllerRing`'s number of available shards observed by the `ControllerRing` controller.
## Shard State Metrics
### `controller_sharding_shard_info`
Type: gauge
Description: Information about a shard.
### `controller_sharding_shard_state`
Type: stateset
Description: The shard's current state observed by the `shardlease` controller.
================================================
FILE: go.mod
================================================
module github.com/timebertt/kubernetes-controller-sharding
go 1.24.0
toolchain go1.25.7
require (
github.com/cespare/xxhash/v2 v2.3.0
github.com/evanphx/json-patch v5.9.11+incompatible
github.com/go-logr/logr v1.4.3
github.com/google/uuid v1.6.0
github.com/hashicorp/go-multierror v1.1.1
github.com/onsi/ginkgo/v2 v2.28.1
github.com/onsi/gomega v1.39.1
github.com/prometheus/client_golang v1.23.2
github.com/spf13/cobra v1.10.2
github.com/spf13/pflag v1.0.10
go.uber.org/zap v1.27.1
gomodules.xyz/jsonpatch/v2 v2.5.0
k8s.io/api v0.34.4
k8s.io/apimachinery v0.34.4
k8s.io/client-go v0.34.4
k8s.io/code-generator v0.34.4
k8s.io/component-base v0.34.4
k8s.io/klog/v2 v2.130.1
k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2
sigs.k8s.io/controller-runtime v0.22.5
)
require (
cel.dev/expr v0.24.0 // indirect
github.com/Masterminds/semver/v3 v3.4.0 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/cel-go v0.26.0 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
go.opentelemetry.io/otel v1.35.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect
go.opentelemetry.io/otel/metric v1.35.0 // indirect
go.opentelemetry.io/otel/sdk v1.34.0 // indirect
go.opentelemetry.io/otel/trace v1.35.0 // indirect
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/mod v0.32.0 // indirect
golang.org/x/net v0.49.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/term v0.39.0 // indirect
golang.org/x/text v0.33.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.41.0 // indirect
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/grpc v1.72.1 // indirect
google.golang.org/protobuf v1.36.8 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.34.3 // indirect
k8s.io/apiserver v0.34.3 // indirect
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
)
================================================
FILE: go.sum
================================================
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=
github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs=
github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo=
github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M=
github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk=
github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE=
github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 h1:z2ogiKUYzX5Is6zr/vP9vJGqPwcdqsWjOt+V8J7+bTc=
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE=
github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE=
github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI=
github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY=
golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM=
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0=
gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA=
google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.34.4 h1:Z5hsoQcZ2yBjelb9j5JKzCVo9qv9XLkVm5llnqS4h+0=
k8s.io/api v0.34.4/go.mod h1:6SaGYuGPkMqqCgg8rPG/OQoCrhgSEV+wWn9v21fDP3o=
k8s.io/apiextensions-apiserver v0.34.3 h1:p10fGlkDY09eWKOTeUSioxwLukJnm+KuDZdrW71y40g=
k8s.io/apiextensions-apiserver v0.34.3/go.mod h1:aujxvqGFRdb/cmXYfcRTeppN7S2XV/t7WMEc64zB5A0=
k8s.io/apimachinery v0.34.4 h1:C5SiSzLEMyWIk53sSbnk0WlOOyqv/MFnWvuc/d6M+xc=
k8s.io/apimachinery v0.34.4/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
k8s.io/apiserver v0.34.3 h1:uGH1qpDvSiYG4HVFqc6A3L4CKiX+aBWDrrsxHYK0Bdo=
k8s.io/apiserver v0.34.3/go.mod h1:QPnnahMO5C2m3lm6fPW3+JmyQbvHZQ8uudAu/493P2w=
k8s.io/client-go v0.34.4 h1:IXhvzFdm0e897kXtLbeyMpAGzontcShJ/gi/XCCsOLc=
k8s.io/client-go v0.34.4/go.mod h1:tXIVJTQabT5QRGlFdxZQFxrIhcGUPpKL5DAc4gSWTE8=
k8s.io/code-generator v0.34.4 h1:ri/HSQ1eCQ40pqTQ4HeEiC8UR/SaftH/syav9RL4b+c=
k8s.io/code-generator v0.34.4/go.mod h1:JbvI8dtG5KB5HJSFzExSbvigBSG8gCncyMtdwg/NVbw=
k8s.io/component-base v0.34.4 h1:jP4XqR48YelfXIlRpOHQgms5GebU23zSE6xcvTwpXDE=
k8s.io/component-base v0.34.4/go.mod h1:uujRfLNOwNiFWz47eBjNZEj/Swn2cdhqI7lW2MeFdrU=
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f h1:SLb+kxmzfA87x4E4brQzB33VBbT2+x7Zq9ROIHmGn9Q=
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 h1:AZYQSJemyQB5eRxqcPky+/7EdBj0xi3g0ZcxxJ7vbWU=
k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/controller-runtime v0.22.5 h1:v3nfSUMowX/2WMp27J9slwGFyAt7IV0YwBxAkrUr0GE=
sigs.k8s.io/controller-runtime v0.22.5/go.mod h1:pc5SoYWnWI6I+cBHYYdZ7B6YHZVY5xNfll88JB+vniI=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
================================================
FILE: go.work
================================================
go 1.24.0
toolchain go1.25.0
use (
.
./webhosting-operator
)
================================================
FILE: go.work.sum
================================================
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0=
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk=
github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/coreos/go-oidc v2.3.0+incompatible h1:+5vEsrgprdLjjQ9FzIKAzQz1wwPD+83hQRfUIPh7rO0=
github.com/coreos/go-oidc v2.3.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E=
github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc=
github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc=
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA=
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 h1:FbSCl+KggFl+Ocym490i/EyXF4lPgLoUtcSWquBM0Rs=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/ianlancetaylor/demangle v0.0.0-20250417193237-f615e6bd150b h1:ogbOPx86mIhFy764gGkqnkFC8m5PJA7sPzlk9ppLVQA=
github.com/ianlancetaylor/demangle v0.0.0-20250417193237-f615e6bd150b/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4=
github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc=
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-03
gitextract_xbddqqqb/
├── .gitattributes
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug.md
│ │ └── enhancement.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── release.yaml
│ ├── renovate.json5
│ └── workflows/
│ ├── e2e.yaml
│ ├── images.yaml
│ ├── release-notes.yaml
│ ├── renovate.yaml
│ └── verify.yaml
├── .gitignore
├── .golangci.yaml
├── .run/
│ ├── experiment (kind).run.xml
│ ├── shard (kind).run.xml
│ ├── sharder (kind).run.xml
│ └── webhosting-operator (kind).run.xml
├── LICENSE
├── Makefile
├── README.md
├── cmd/
│ ├── checksum-controller/
│ │ ├── main.go
│ │ └── reconciler.go
│ └── sharder/
│ ├── app/
│ │ ├── app.go
│ │ └── options.go
│ └── main.go
├── config/
│ ├── README.md
│ ├── certificate/
│ │ ├── certificate.yaml
│ │ ├── issuer.yaml
│ │ └── kustomization.yaml
│ ├── crds/
│ │ ├── kustomization.yaml
│ │ ├── namespace.yaml
│ │ └── sharding.timebertt.dev_controllerrings.yaml
│ ├── default/
│ │ └── kustomization.yaml
│ ├── monitoring/
│ │ ├── kustomization.yaml
│ │ ├── prometheus_rbac.yaml
│ │ └── servicemonitor.yaml
│ ├── rbac/
│ │ ├── kustomization.yaml
│ │ ├── leader_election.yaml
│ │ ├── metrics_auth.yaml
│ │ ├── pprof_reader.yaml
│ │ ├── role.yaml
│ │ ├── rolebinding.yaml
│ │ └── serviceaccount.yaml
│ └── sharder/
│ ├── config.yaml
│ ├── deployment.yaml
│ ├── kustomization.yaml
│ ├── poddisruptionbudget.yaml
│ └── service.yaml
├── docs/
│ ├── README.md
│ ├── design.md
│ ├── development.md
│ ├── evaluation.md
│ ├── getting-started.md
│ ├── implement-sharding.md
│ ├── installation.md
│ └── monitoring.md
├── go.mod
├── go.sum
├── go.work
├── go.work.sum
├── hack/
│ ├── boilerplate.go.txt
│ ├── ci-common.sh
│ ├── ci-e2e-kind.sh
│ ├── config/
│ │ ├── README.md
│ │ ├── cert-manager/
│ │ │ ├── kustomization.yaml
│ │ │ ├── patch-mutatingwebhook.yaml
│ │ │ ├── patch-validatingwebhook.yaml
│ │ │ └── resources/
│ │ │ ├── cluster-issuer.yaml
│ │ │ └── kustomization.yaml
│ │ ├── certificates/
│ │ │ └── host/
│ │ │ ├── config.json
│ │ │ ├── generate.sh
│ │ │ ├── kustomization.yaml
│ │ │ ├── webhook-ca-key.pem
│ │ │ ├── webhook-ca.json
│ │ │ ├── webhook-ca.pem
│ │ │ ├── webhook-server-key.pem
│ │ │ ├── webhook-server.json
│ │ │ └── webhook-server.pem
│ │ ├── checksum-controller/
│ │ │ ├── controller/
│ │ │ │ ├── deployment.yaml
│ │ │ │ ├── kustomization.yaml
│ │ │ │ ├── rbac.yaml
│ │ │ │ └── serviceaccount.yaml
│ │ │ └── controllerring/
│ │ │ ├── controllerring.yaml
│ │ │ ├── kustomization.yaml
│ │ │ └── sharder_rbac.yaml
│ │ ├── external-dns/
│ │ │ ├── kustomization.yaml
│ │ │ ├── namespace.yaml
│ │ │ └── patch-deployment.yaml
│ │ ├── ingress-nginx/
│ │ │ ├── default/
│ │ │ │ ├── kustomization.yaml
│ │ │ │ ├── patch_controller_resources.yaml
│ │ │ │ └── patch_default_ingress_class.yaml
│ │ │ ├── kind/
│ │ │ │ ├── kustomization.yaml
│ │ │ │ └── patch_service_nodeport.yaml
│ │ │ └── shoot/
│ │ │ ├── certificate.yaml
│ │ │ ├── kustomization.yaml
│ │ │ └── patch_service.yaml
│ │ ├── kind-config.yaml
│ │ ├── kyverno/
│ │ │ └── kustomization.yaml
│ │ ├── monitoring/
│ │ │ ├── crds/
│ │ │ │ ├── 0alertmanagerConfigCustomResourceDefinition.yaml
│ │ │ │ ├── 0alertmanagerCustomResourceDefinition.yaml
│ │ │ │ ├── 0podmonitorCustomResourceDefinition.yaml
│ │ │ │ ├── 0probeCustomResourceDefinition.yaml
│ │ │ │ ├── 0prometheusCustomResourceDefinition.yaml
│ │ │ │ ├── 0prometheusagentCustomResourceDefinition.yaml
│ │ │ │ ├── 0prometheusruleCustomResourceDefinition.yaml
│ │ │ │ ├── 0scrapeconfigCustomResourceDefinition.yaml
│ │ │ │ ├── 0servicemonitorCustomResourceDefinition.yaml
│ │ │ │ ├── 0thanosrulerCustomResourceDefinition.yaml
│ │ │ │ ├── README.md
│ │ │ │ └── kustomization.yaml
│ │ │ ├── default/
│ │ │ │ ├── dashboards/
│ │ │ │ │ ├── client-go.json
│ │ │ │ │ ├── controller-details.json
│ │ │ │ │ └── controller-runtime.json
│ │ │ │ ├── ensure-admin-password.sh
│ │ │ │ ├── grafana_ingress.yaml
│ │ │ │ ├── kustomization.yaml
│ │ │ │ ├── namespace.yaml
│ │ │ │ ├── patch_grafana_admin.yaml
│ │ │ │ ├── patch_grafana_networkpolicy.yaml
│ │ │ │ ├── patch_kubelet_metrics.yaml
│ │ │ │ ├── patch_kubestatemetrics.yaml
│ │ │ │ ├── patch_kubestatemetrics_servicemonitor.yaml
│ │ │ │ ├── patch_prometheus.yaml
│ │ │ │ └── rbac-proxy_clusterrole.yaml
│ │ │ ├── grafana-sidecar/
│ │ │ │ ├── dashboards-sidecar.yaml
│ │ │ │ ├── kustomization.yaml
│ │ │ │ ├── patch_grafana_sidecar.yaml
│ │ │ │ ├── sidecar_clusterrole.yaml
│ │ │ │ └── sidecar_clusterrolebinding.yaml
│ │ │ ├── kube-prometheus/
│ │ │ │ ├── README.md
│ │ │ │ ├── blackboxExporter-clusterRole.yaml
│ │ │ │ ├── blackboxExporter-clusterRoleBinding.yaml
│ │ │ │ ├── blackboxExporter-configuration.yaml
│ │ │ │ ├── blackboxExporter-deployment.yaml
│ │ │ │ ├── blackboxExporter-networkPolicy.yaml
│ │ │ │ ├── blackboxExporter-service.yaml
│ │ │ │ ├── blackboxExporter-serviceAccount.yaml
│ │ │ │ ├── blackboxExporter-serviceMonitor.yaml
│ │ │ │ ├── grafana-config.yaml
│ │ │ │ ├── grafana-dashboardDatasources.yaml
│ │ │ │ ├── grafana-dashboardDefinitions.yaml
│ │ │ │ ├── grafana-dashboardSources.yaml
│ │ │ │ ├── grafana-deployment.yaml
│ │ │ │ ├── grafana-networkPolicy.yaml
│ │ │ │ ├── grafana-prometheusRule.yaml
│ │ │ │ ├── grafana-service.yaml
│ │ │ │ ├── grafana-serviceAccount.yaml
│ │ │ │ ├── grafana-serviceMonitor.yaml
│ │ │ │ ├── kubePrometheus-prometheusRule.yaml
│ │ │ │ ├── kubeStateMetrics-clusterRole.yaml
│ │ │ │ ├── kubeStateMetrics-clusterRoleBinding.yaml
│ │ │ │ ├── kubeStateMetrics-deployment.yaml
│ │ │ │ ├── kubeStateMetrics-networkPolicy.yaml
│ │ │ │ ├── kubeStateMetrics-prometheusRule.yaml
│ │ │ │ ├── kubeStateMetrics-service.yaml
│ │ │ │ ├── kubeStateMetrics-serviceAccount.yaml
│ │ │ │ ├── kubeStateMetrics-serviceMonitor.yaml
│ │ │ │ ├── kubernetesControlPlane-prometheusRule.yaml
│ │ │ │ ├── kubernetesControlPlane-serviceMonitorApiserver.yaml
│ │ │ │ ├── kubernetesControlPlane-serviceMonitorCoreDNS.yaml
│ │ │ │ ├── kubernetesControlPlane-serviceMonitorKubeControllerManager.yaml
│ │ │ │ ├── kubernetesControlPlane-serviceMonitorKubeScheduler.yaml
│ │ │ │ ├── kubernetesControlPlane-serviceMonitorKubelet.yaml
│ │ │ │ ├── kustomization.yaml
│ │ │ │ ├── nodeExporter-clusterRole.yaml
│ │ │ │ ├── nodeExporter-clusterRoleBinding.yaml
│ │ │ │ ├── nodeExporter-daemonset.yaml
│ │ │ │ ├── nodeExporter-networkPolicy.yaml
│ │ │ │ ├── nodeExporter-prometheusRule.yaml
│ │ │ │ ├── nodeExporter-service.yaml
│ │ │ │ ├── nodeExporter-serviceAccount.yaml
│ │ │ │ ├── nodeExporter-serviceMonitor.yaml
│ │ │ │ ├── prometheus-clusterRole.yaml
│ │ │ │ ├── prometheus-clusterRoleBinding.yaml
│ │ │ │ ├── prometheus-networkPolicy.yaml
│ │ │ │ ├── prometheus-prometheus.yaml
│ │ │ │ ├── prometheus-prometheusRule.yaml
│ │ │ │ ├── prometheus-roleBindingConfig.yaml
│ │ │ │ ├── prometheus-roleBindingSpecificNamespaces.yaml
│ │ │ │ ├── prometheus-roleConfig.yaml
│ │ │ │ ├── prometheus-roleSpecificNamespaces.yaml
│ │ │ │ ├── prometheus-service.yaml
│ │ │ │ ├── prometheus-serviceAccount.yaml
│ │ │ │ ├── prometheus-serviceMonitor.yaml
│ │ │ │ ├── prometheusAdapter-clusterRole.yaml
│ │ │ │ ├── prometheusAdapter-clusterRoleAggregatedMetricsReader.yaml
│ │ │ │ ├── prometheusAdapter-clusterRoleBinding.yaml
│ │ │ │ ├── prometheusAdapter-clusterRoleBindingDelegator.yaml
│ │ │ │ ├── prometheusAdapter-clusterRoleServerResources.yaml
│ │ │ │ ├── prometheusAdapter-configMap.yaml
│ │ │ │ ├── prometheusAdapter-deployment.yaml
│ │ │ │ ├── prometheusAdapter-networkPolicy.yaml
│ │ │ │ ├── prometheusAdapter-podDisruptionBudget.yaml
│ │ │ │ ├── prometheusAdapter-roleBindingAuthReader.yaml
│ │ │ │ ├── prometheusAdapter-service.yaml
│ │ │ │ ├── prometheusAdapter-serviceAccount.yaml
│ │ │ │ ├── prometheusAdapter-serviceMonitor.yaml
│ │ │ │ ├── prometheusOperator-clusterRole.yaml
│ │ │ │ ├── prometheusOperator-clusterRoleBinding.yaml
│ │ │ │ ├── prometheusOperator-deployment.yaml
│ │ │ │ ├── prometheusOperator-networkPolicy.yaml
│ │ │ │ ├── prometheusOperator-prometheusRule.yaml
│ │ │ │ ├── prometheusOperator-service.yaml
│ │ │ │ ├── prometheusOperator-serviceAccount.yaml
│ │ │ │ └── prometheusOperator-serviceMonitor.yaml
│ │ │ ├── shoot/
│ │ │ │ ├── kustomization.yaml
│ │ │ │ ├── patch_prometheus.yaml
│ │ │ │ └── storageclass.yaml
│ │ │ └── update.sh
│ │ ├── policy/
│ │ │ ├── ci/
│ │ │ │ ├── kustomization.yaml
│ │ │ │ └── no-requests.yaml
│ │ │ ├── controlplane/
│ │ │ │ ├── etcd-main.yaml
│ │ │ │ ├── kube-apiserver.yaml
│ │ │ │ ├── kube-controller-manager.yaml
│ │ │ │ └── kustomization.yaml
│ │ │ └── shoot/
│ │ │ ├── kustomization.yaml
│ │ │ └── sharder-scheduling.yaml
│ │ ├── profiling/
│ │ │ ├── ensure-admin-password.sh
│ │ │ ├── kustomization.yaml
│ │ │ ├── parca_config.yaml
│ │ │ ├── parca_ingress.yaml
│ │ │ ├── parca_pvc.yaml
│ │ │ ├── patch_deployment_pvc.yaml
│ │ │ └── rbac_sharder.yaml
│ │ ├── sharder/
│ │ │ ├── devel/
│ │ │ │ └── kustomization.yaml
│ │ │ └── host/
│ │ │ └── config.yaml
│ │ ├── shoot.yaml
│ │ └── skaffold.yaml
│ ├── prepare-image-metadata.sh
│ ├── test-e2e.env
│ ├── test-e2e.sh
│ ├── test-integration.env
│ ├── test-integration.sh
│ ├── test.sh
│ ├── tools.go
│ ├── tools.mk
│ ├── update-codegen.sh
│ └── vgopath-setup.sh
├── pkg/
│ ├── apis/
│ │ ├── config/
│ │ │ ├── doc.go
│ │ │ └── v1alpha1/
│ │ │ ├── defaults.go
│ │ │ ├── defaults_test.go
│ │ │ ├── doc.go
│ │ │ ├── register.go
│ │ │ ├── types.go
│ │ │ ├── v1alpha1_suite_test.go
│ │ │ ├── zz_generated.deepcopy.go
│ │ │ └── zz_generated.defaults.go
│ │ └── sharding/
│ │ ├── doc.go
│ │ └── v1alpha1/
│ │ ├── constants.go
│ │ ├── constants_test.go
│ │ ├── doc.go
│ │ ├── register.go
│ │ ├── types_controllerring.go
│ │ ├── types_controllerring_test.go
│ │ ├── v1alpha1_suite_test.go
│ │ └── zz_generated.deepcopy.go
│ ├── controller/
│ │ ├── add.go
│ │ ├── controllerring/
│ │ │ ├── add.go
│ │ │ ├── add_test.go
│ │ │ ├── controllerring_suite_test.go
│ │ │ ├── reconciler.go
│ │ │ └── reconciler_test.go
│ │ ├── sharder/
│ │ │ ├── add.go
│ │ │ ├── reconciler.go
│ │ │ ├── reconciler_test.go
│ │ │ └── sharder_suite_test.go
│ │ └── shardlease/
│ │ ├── add.go
│ │ ├── add_test.go
│ │ ├── reconciler.go
│ │ └── shardlease_suite_test.go
│ ├── metrics/
│ │ ├── add.go
│ │ ├── controllerring.go
│ │ ├── exporter/
│ │ │ └── exporter.go
│ │ ├── operations.go
│ │ └── shard.go
│ ├── shard/
│ │ ├── controller/
│ │ │ ├── builder.go
│ │ │ ├── builder_test.go
│ │ │ ├── controller_suite_test.go
│ │ │ ├── predicate.go
│ │ │ ├── predicate_test.go
│ │ │ ├── reconciler.go
│ │ │ └── reconciler_test.go
│ │ └── lease/
│ │ ├── lease.go
│ │ ├── lease_suite_test.go
│ │ └── lease_test.go
│ ├── sharding/
│ │ ├── consistenthash/
│ │ │ ├── benchmark_test.go
│ │ │ ├── consistenthash_suite_test.go
│ │ │ ├── ring.go
│ │ │ └── ring_test.go
│ │ ├── handler/
│ │ │ ├── controllerring.go
│ │ │ ├── controllerring_test.go
│ │ │ ├── handler_suite_test.go
│ │ │ ├── lease.go
│ │ │ └── lease_test.go
│ │ ├── key/
│ │ │ ├── key.go
│ │ │ ├── key_suite_test.go
│ │ │ └── key_test.go
│ │ ├── leases/
│ │ │ ├── leases_suite_test.go
│ │ │ ├── shards.go
│ │ │ ├── shards_test.go
│ │ │ ├── state.go
│ │ │ ├── state_test.go
│ │ │ ├── times.go
│ │ │ └── times_test.go
│ │ ├── predicate/
│ │ │ ├── controllerring.go
│ │ │ ├── controllerring_test.go
│ │ │ ├── lease.go
│ │ │ ├── lease_test.go
│ │ │ └── predicate_suite_test.go
│ │ └── ring/
│ │ ├── ring.go
│ │ ├── ring_suite_test.go
│ │ └── ring_test.go
│ ├── utils/
│ │ ├── client/
│ │ │ ├── client_suite_test.go
│ │ │ ├── options.go
│ │ │ ├── options_test.go
│ │ │ └── scheme.go
│ │ ├── errors/
│ │ │ ├── errors_suite_test.go
│ │ │ ├── multi.go
│ │ │ └── multi_test.go
│ │ ├── healthz/
│ │ │ ├── cache.go
│ │ │ ├── cache_test.go
│ │ │ └── healthz_suite_test.go
│ │ ├── pager/
│ │ │ ├── pager.go
│ │ │ ├── pager_suite_test.go
│ │ │ └── pager_test.go
│ │ ├── routes/
│ │ │ └── profiling.go
│ │ ├── strings.go
│ │ ├── strings_test.go
│ │ ├── test/
│ │ │ ├── envtest.go
│ │ │ ├── matchers/
│ │ │ │ ├── condition.go
│ │ │ │ ├── errors.go
│ │ │ │ ├── matchers.go
│ │ │ │ └── object.go
│ │ │ ├── object.go
│ │ │ └── paths.go
│ │ └── utils_suite_test.go
│ └── webhook/
│ ├── add.go
│ └── sharder/
│ ├── add.go
│ ├── add_test.go
│ ├── handler.go
│ ├── handler_test.go
│ ├── metrics.go
│ └── sharder_suite_test.go
├── test/
│ ├── e2e/
│ │ ├── checksum_controller_test.go
│ │ └── e2e_suite_test.go
│ └── integration/
│ ├── shard/
│ │ ├── controller/
│ │ │ ├── controller_suite_test.go
│ │ │ ├── controller_test.go
│ │ │ └── reconciler.go
│ │ └── lease/
│ │ ├── lease_suite_test.go
│ │ └── lease_test.go
│ └── sharder/
│ ├── controller/
│ │ ├── controllerring/
│ │ │ ├── controllerring_suite_test.go
│ │ │ └── controllerring_test.go
│ │ ├── sharder/
│ │ │ ├── sharder_suite_test.go
│ │ │ └── sharder_test.go
│ │ └── shardlease/
│ │ ├── shardlease_suite_test.go
│ │ └── shardlease_test.go
│ └── webhook/
│ └── sharder/
│ ├── sharder_suite_test.go
│ └── sharder_test.go
└── webhosting-operator/
├── PROJECT
├── README.md
├── cmd/
│ ├── experiment/
│ │ └── main.go
│ ├── measure/
│ │ ├── main.go
│ │ └── test.yaml
│ ├── samples-generator/
│ │ └── main.go
│ └── webhosting-operator/
│ └── main.go
├── config/
│ ├── experiment/
│ │ ├── base/
│ │ │ ├── job.yaml
│ │ │ ├── kustomization.yaml
│ │ │ ├── namespace.yaml
│ │ │ ├── prometheus_rbac.yaml
│ │ │ ├── rbac.yaml
│ │ │ ├── service.yaml
│ │ │ └── servicemonitor.yaml
│ │ ├── basic/
│ │ │ └── kustomization.yaml
│ │ ├── chaos/
│ │ │ └── kustomization.yaml
│ │ ├── rolling-update/
│ │ │ └── kustomization.yaml
│ │ └── scale-out/
│ │ └── kustomization.yaml
│ ├── manager/
│ │ ├── base/
│ │ │ ├── kustomization.yaml
│ │ │ ├── manager.yaml
│ │ │ ├── metrics_auth.yaml
│ │ │ ├── namespace.yaml
│ │ │ └── service.yaml
│ │ ├── controllerring/
│ │ │ ├── controllerring.yaml
│ │ │ ├── kustomization.yaml
│ │ │ ├── manager_patch.yaml
│ │ │ └── sharder_rbac.yaml
│ │ ├── crds/
│ │ │ ├── kustomization.yaml
│ │ │ ├── kustomizeconfig.yaml
│ │ │ ├── webhosting.timebertt.dev_themes.yaml
│ │ │ └── webhosting.timebertt.dev_websites.yaml
│ │ ├── devel/
│ │ │ └── kustomization.yaml
│ │ ├── overlays/
│ │ │ ├── debug/
│ │ │ │ ├── kustomization.yaml
│ │ │ │ └── manager_debug_patch.yaml
│ │ │ ├── default/
│ │ │ │ └── kustomization.yaml
│ │ │ ├── devel/
│ │ │ │ └── kustomization.yaml
│ │ │ ├── non-sharded/
│ │ │ │ ├── kustomization.yaml
│ │ │ │ └── manager_patch.yaml
│ │ │ ├── non-sharded-devel/
│ │ │ │ └── kustomization.yaml
│ │ │ └── shoot/
│ │ │ ├── default/
│ │ │ │ └── kustomization.yaml
│ │ │ ├── devel/
│ │ │ │ └── kustomization.yaml
│ │ │ ├── non-sharded/
│ │ │ │ └── kustomization.yaml
│ │ │ └── non-sharded-devel/
│ │ │ └── kustomization.yaml
│ │ ├── rbac/
│ │ │ ├── kustomization.yaml
│ │ │ ├── leader_election_role.yaml
│ │ │ ├── leader_election_role_binding.yaml
│ │ │ ├── parca_rbac.yaml
│ │ │ ├── role.yaml
│ │ │ ├── role_binding.yaml
│ │ │ ├── service_account.yaml
│ │ │ ├── theme_editor_role.yaml
│ │ │ ├── theme_viewer_role.yaml
│ │ │ ├── website_editor_role.yaml
│ │ │ └── website_viewer_role.yaml
│ │ └── with-dns/
│ │ ├── config.yaml
│ │ ├── kustomization.yaml
│ │ └── manager_patch.yaml
│ ├── monitoring/
│ │ ├── default/
│ │ │ ├── dashboards/
│ │ │ │ ├── experiments.json
│ │ │ │ ├── sharding.json
│ │ │ │ └── webhosting.json
│ │ │ └── kustomization.yaml
│ │ └── webhosting-operator/
│ │ ├── kustomization.yaml
│ │ ├── prometheus_rbac.yaml
│ │ ├── prometheusrule.yaml
│ │ └── servicemonitor.yaml
│ ├── policy/
│ │ ├── experiment-scheduling.yaml
│ │ ├── guaranteed-resources.yaml
│ │ ├── kustomization.yaml
│ │ ├── scale-up-worker-experiment.yaml
│ │ └── webhosting-operator-scheduling.yaml
│ └── samples/
│ ├── kustomization.yaml
│ ├── project_namespace.yaml
│ ├── theme_exciting.yaml
│ ├── theme_lame.yaml
│ ├── website_kubecon.yaml
│ ├── website_library.yaml
│ └── website_museum.yaml
├── go.mod
├── go.sum
├── pkg/
│ ├── apis/
│ │ ├── config/
│ │ │ ├── doc.go
│ │ │ └── v1alpha1/
│ │ │ ├── defaults.go
│ │ │ ├── doc.go
│ │ │ ├── register.go
│ │ │ ├── types.go
│ │ │ ├── zz_generated.deepcopy.go
│ │ │ └── zz_generated.defaults.go
│ │ └── webhosting/
│ │ ├── doc.go
│ │ └── v1alpha1/
│ │ ├── constants.go
│ │ ├── doc.go
│ │ ├── register.go
│ │ ├── types_theme.go
│ │ ├── types_website.go
│ │ └── zz_generated.deepcopy.go
│ ├── controllers/
│ │ └── webhosting/
│ │ ├── suite_test.go
│ │ ├── templates/
│ │ │ ├── index.go
│ │ │ ├── index.tmpl
│ │ │ ├── index_test.go
│ │ │ ├── internal/
│ │ │ │ └── examples.go
│ │ │ ├── nginx.conf.tmpl
│ │ │ ├── nginx.go
│ │ │ ├── nginx_test.go
│ │ │ ├── templates_suite_test.go
│ │ │ └── testserver/
│ │ │ └── server.go
│ │ └── website_controller.go
│ ├── experiment/
│ │ ├── generator/
│ │ │ ├── options.go
│ │ │ ├── project.go
│ │ │ ├── reconciler.go
│ │ │ ├── theme.go
│ │ │ ├── utils.go
│ │ │ └── website.go
│ │ ├── scenario/
│ │ │ ├── all/
│ │ │ │ └── all.go
│ │ │ ├── base/
│ │ │ │ └── base.go
│ │ │ ├── basic/
│ │ │ │ └── basic.go
│ │ │ ├── chaos/
│ │ │ │ └── chaos.go
│ │ │ ├── rolling-update/
│ │ │ │ └── rolling_update.go
│ │ │ └── scale-out/
│ │ │ └── scale_out.go
│ │ ├── scenario.go
│ │ └── tracker/
│ │ ├── tracker.go
│ │ └── website.go
│ ├── metrics/
│ │ ├── add.go
│ │ ├── theme.go
│ │ └── website.go
│ └── utils/
│ ├── kubernetes.go
│ └── utils.go
└── test/
└── e2e/
├── e2e_suite_test.go
└── webhosting_operator_test.go
SYMBOL INDEX (649 symbols across 135 files)
FILE: cmd/checksum-controller/main.go
function main (line 46) | func main() {
type options (line 80) | type options struct
method AddFlags (line 99) | func (o *options) AddFlags(fs *pflag.FlagSet) {
method validate (line 110) | func (o *options) validate() error {
method run (line 118) | func (o *options) run(ctx context.Context) error {
function newOptions (line 88) | func newOptions() *options {
FILE: cmd/checksum-controller/reconciler.go
type Reconciler (line 45) | type Reconciler struct
method AddToManager (line 50) | func (r *Reconciler) AddToManager(mgr manager.Manager, controllerRingN...
method Reconcile (line 96) | func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Requ...
function SecretDataChanged (line 77) | func SecretDataChanged() predicate.Predicate {
function ObjectDeleted (line 86) | func ObjectDeleted() predicate.Predicate {
FILE: cmd/sharder/app/app.go
constant Name (line 41) | Name = "sharder"
function NewCommand (line 44) | func NewCommand() *cobra.Command {
function run (line 83) | func run(ctx context.Context, log logr.Logger, opts *options) error {
FILE: cmd/sharder/app/options.go
type options (line 54) | type options struct
method addFlags (line 71) | func (o *options) addFlags(fs *pflag.FlagSet) {
method complete (line 79) | func (o *options) complete() error {
method applyConfigToRESTConfig (line 120) | func (o *options) applyConfigToRESTConfig() {
method applyConfigToManagerOptions (line 131) | func (o *options) applyConfigToManagerOptions() {
method applyCacheOptions (line 172) | func (o *options) applyCacheOptions() {
method applyOptionsOverrides (line 192) | func (o *options) applyOptionsOverrides() error {
function newOptions (line 63) | func newOptions() *options {
function dropUnwantedMetadata (line 206) | func dropUnwantedMetadata(i interface{}) (interface{}, error) {
FILE: cmd/sharder/main.go
function main (line 28) | func main() {
FILE: pkg/apis/config/v1alpha1/defaults.go
function addDefaultingFuncs (line 33) | func addDefaultingFuncs(scheme *runtime.Scheme) error {
function SetDefaults_SharderConfig (line 37) | func SetDefaults_SharderConfig(obj *SharderConfig) {
function SetDefaults_LeaderElectionConfiguration (line 53) | func SetDefaults_LeaderElectionConfiguration(obj *componentbaseconfigv1a...
function SetDefaults_DebuggingConfiguration (line 68) | func SetDefaults_DebuggingConfiguration(obj *componentbaseconfigv1alpha1...
function SetDefaults_HealthEndpoint (line 76) | func SetDefaults_HealthEndpoint(obj *HealthEndpoint) {
function SetDefaults_MetricsEndpoint (line 82) | func SetDefaults_MetricsEndpoint(obj *MetricsEndpoint) {
function SetDefaults_Controller (line 88) | func SetDefaults_Controller(obj *Controller) {
function SetDefaults_SharderController (line 94) | func SetDefaults_SharderController(obj *SharderController) {
function SetDefaults_Webhook (line 103) | func SetDefaults_Webhook(obj *Webhook) {
function SetDefaults_WebhookConfig (line 113) | func SetDefaults_WebhookConfig(obj *WebhookConfig) {
function SetDefaults_WebhookClientConfig (line 129) | func SetDefaults_WebhookClientConfig(obj *admissionregistrationv1.Webhoo...
function SetDefaults_ServiceReference (line 135) | func SetDefaults_ServiceReference(obj *admissionregistrationv1.ServiceRe...
FILE: pkg/apis/config/v1alpha1/register.go
constant GroupName (line 28) | GroupName = "config.sharding.timebertt.dev"
function addKnownTypes (line 41) | func addKnownTypes(scheme *runtime.Scheme) error {
FILE: pkg/apis/config/v1alpha1/types.go
type SharderConfig (line 28) | type SharderConfig struct
type HealthEndpoint (line 58) | type HealthEndpoint struct
type MetricsEndpoint (line 68) | type MetricsEndpoint struct
type Controller (line 78) | type Controller struct
type SharderController (line 85) | type SharderController struct
type Webhook (line 98) | type Webhook struct
type WebhookServer (line 108) | type WebhookServer struct
type WebhookConfig (line 124) | type WebhookConfig struct
FILE: pkg/apis/config/v1alpha1/v1alpha1_suite_test.go
function TestV1alpha1 (line 26) | func TestV1alpha1(t *testing.T) {
FILE: pkg/apis/config/v1alpha1/zz_generated.deepcopy.go
method DeepCopyInto (line 31) | func (in *Controller) DeepCopyInto(out *Controller) {
method DeepCopy (line 41) | func (in *Controller) DeepCopy() *Controller {
method DeepCopyInto (line 51) | func (in *HealthEndpoint) DeepCopyInto(out *HealthEndpoint) {
method DeepCopy (line 56) | func (in *HealthEndpoint) DeepCopy() *HealthEndpoint {
method DeepCopyInto (line 66) | func (in *MetricsEndpoint) DeepCopyInto(out *MetricsEndpoint) {
method DeepCopy (line 71) | func (in *MetricsEndpoint) DeepCopy() *MetricsEndpoint {
method DeepCopyInto (line 81) | func (in *SharderConfig) DeepCopyInto(out *SharderConfig) {
method DeepCopy (line 111) | func (in *SharderConfig) DeepCopy() *SharderConfig {
method DeepCopyObject (line 121) | func (in *SharderConfig) DeepCopyObject() runtime.Object {
method DeepCopyInto (line 129) | func (in *SharderController) DeepCopyInto(out *SharderController) {
method DeepCopy (line 144) | func (in *SharderController) DeepCopy() *SharderController {
method DeepCopyInto (line 154) | func (in *Webhook) DeepCopyInto(out *Webhook) {
method DeepCopy (line 169) | func (in *Webhook) DeepCopy() *Webhook {
method DeepCopyInto (line 179) | func (in *WebhookConfig) DeepCopyInto(out *WebhookConfig) {
method DeepCopy (line 201) | func (in *WebhookConfig) DeepCopy() *WebhookConfig {
method DeepCopyInto (line 211) | func (in *WebhookServer) DeepCopyInto(out *WebhookServer) {
method DeepCopy (line 231) | func (in *WebhookServer) DeepCopy() *WebhookServer {
FILE: pkg/apis/config/v1alpha1/zz_generated.defaults.go
function RegisterDefaults (line 31) | func RegisterDefaults(scheme *runtime.Scheme) error {
function SetObjectDefaults_SharderConfig (line 36) | func SetObjectDefaults_SharderConfig(in *SharderConfig) {
FILE: pkg/apis/sharding/v1alpha1/constants.go
constant NamespaceSystem (line 23) | NamespaceSystem = "sharding-system"
constant AppControllerSharding (line 26) | AppControllerSharding = "controller-sharding"
constant alphaPrefix (line 29) | alphaPrefix = "alpha.sharding.timebertt.dev/"
constant LabelControllerRing (line 32) | LabelControllerRing = alphaPrefix + "controllerring"
constant LabelState (line 35) | LabelState = alphaPrefix + "state"
constant LabelShardPrefix (line 38) | LabelShardPrefix = "shard." + alphaPrefix
constant LabelDrainPrefix (line 42) | LabelDrainPrefix = "drain." + alphaPrefix
constant IdentityShardLeaseController (line 46) | IdentityShardLeaseController = "shardlease-controller"
function LabelShard (line 50) | func LabelShard(ringName string) string {
function LabelDrain (line 56) | func LabelDrain(ringName string) string {
FILE: pkg/apis/sharding/v1alpha1/register.go
constant GroupName (line 29) | GroupName = "sharding.timebertt.dev"
function addKnownTypes (line 42) | func addKnownTypes(scheme *runtime.Scheme) error {
FILE: pkg/apis/sharding/v1alpha1/types_controllerring.go
type ControllerRing (line 36) | type ControllerRing struct
method LeaseSelector (line 119) | func (c *ControllerRing) LeaseSelector() labels.Selector {
method LabelShard (line 124) | func (c *ControllerRing) LabelShard() string {
method LabelDrain (line 130) | func (c *ControllerRing) LabelDrain() string {
type ControllerRingList (line 52) | type ControllerRingList struct
type ControllerRingSpec (line 62) | type ControllerRingSpec struct
type RingResource (line 79) | type RingResource struct
constant ControllerRingReady (line 98) | ControllerRingReady = "Ready"
type ControllerRingStatus (line 102) | type ControllerRingStatus struct
FILE: pkg/apis/sharding/v1alpha1/v1alpha1_suite_test.go
function TestV1alpha1 (line 26) | func TestV1alpha1(t *testing.T) {
FILE: pkg/apis/sharding/v1alpha1/zz_generated.deepcopy.go
method DeepCopyInto (line 29) | func (in *ControllerRing) DeepCopyInto(out *ControllerRing) {
method DeepCopy (line 38) | func (in *ControllerRing) DeepCopy() *ControllerRing {
method DeepCopyObject (line 48) | func (in *ControllerRing) DeepCopyObject() runtime.Object {
method DeepCopyInto (line 56) | func (in *ControllerRingList) DeepCopyInto(out *ControllerRingList) {
method DeepCopy (line 70) | func (in *ControllerRingList) DeepCopy() *ControllerRingList {
method DeepCopyObject (line 80) | func (in *ControllerRingList) DeepCopyObject() runtime.Object {
method DeepCopyInto (line 88) | func (in *ControllerRingSpec) DeepCopyInto(out *ControllerRingSpec) {
method DeepCopy (line 105) | func (in *ControllerRingSpec) DeepCopy() *ControllerRingSpec {
method DeepCopyInto (line 115) | func (in *ControllerRingStatus) DeepCopyInto(out *ControllerRingStatus) {
method DeepCopy (line 127) | func (in *ControllerRingStatus) DeepCopy() *ControllerRingStatus {
method DeepCopyInto (line 137) | func (in *RingResource) DeepCopyInto(out *RingResource) {
method DeepCopy (line 148) | func (in *RingResource) DeepCopy() *RingResource {
FILE: pkg/controller/add.go
function AddToManager (line 32) | func AddToManager(ctx context.Context, mgr manager.Manager, config *conf...
FILE: pkg/controller/controllerring/add.go
constant ControllerName (line 36) | ControllerName = "controllerring"
method AddToManager (line 39) | func (r *Reconciler) AddToManager(mgr manager.Manager) error {
method LeasePredicate (line 64) | func (r *Reconciler) LeasePredicate() predicate.Predicate {
FILE: pkg/controller/controllerring/controllerring_suite_test.go
function TestControllerRing (line 26) | func TestControllerRing(t *testing.T) {
FILE: pkg/controller/controllerring/reconciler.go
type Reconciler (line 54) | type Reconciler struct
method Reconcile (line 62) | func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Requ...
method updateStatusSuccess (line 95) | func (r *Reconciler) updateStatusSuccess(ctx context.Context, controll...
method updateStatusError (line 106) | func (r *Reconciler) updateStatusError(ctx context.Context, log logr.L...
method OptionallyUpdateStatus (line 124) | func (r *Reconciler) OptionallyUpdateStatus(ctx context.Context, contr...
method reconcileWebhooks (line 142) | func (r *Reconciler) reconcileWebhooks(ctx context.Context, controller...
method WebhookConfigForControllerRing (line 151) | func (r *Reconciler) WebhookConfigForControllerRing(controllerRing *sh...
function WebhookConfigForControllerRing (line 160) | func WebhookConfigForControllerRing(controllerRing *shardingv1alpha1.Con...
function WebhookForControllerRing (line 178) | func WebhookForControllerRing(controllerRing *shardingv1alpha1.Controlle...
function RuleForResource (line 231) | func RuleForResource(gr metav1.GroupResource) admissionregistrationv1.Ru...
FILE: pkg/controller/sharder/add.go
constant ControllerName (line 38) | ControllerName = "sharder"
method AddToManager (line 41) | func (r *Reconciler) AddToManager(mgr manager.Manager) error {
method LeasePredicate (line 71) | func (r *Reconciler) LeasePredicate() predicate.Predicate {
FILE: pkg/controller/sharder/reconciler.go
type Reconciler (line 62) | type Reconciler struct
method Reconcile (line 70) | func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Requ...
method NewOperation (line 100) | func (r *Reconciler) NewOperation(ctx context.Context, controllerRing ...
method GetSelectedNamespaces (line 126) | func (r *Reconciler) GetSelectedNamespaces(ctx context.Context, contro...
type Operation (line 150) | type Operation struct
method ResyncControllerRing (line 171) | func (o *Operation) ResyncControllerRing(ctx context.Context, log logr...
method compileWorkItemsForRing (line 208) | func (o *Operation) compileWorkItemsForRing(ctx context.Context, work ...
method compileWorkItemsForResource (line 219) | func (o *Operation) compileWorkItemsForResource(
method workItemForObject (line 271) | func (o *Operation) workItemForObject(
method processNextWorkItem (line 325) | func (o *Operation) processNextWorkItem(
method drainObject (line 360) | func (o *Operation) drainObject(
method moveObject (line 384) | func (o *Operation) moveObject(
type workItem (line 162) | type workItem struct
FILE: pkg/controller/sharder/reconciler_test.go
function newNamespace (line 173) | func newNamespace(name string) *corev1.Namespace {
function newLease (line 180) | func newLease() *coordinationv1.Lease {
FILE: pkg/controller/sharder/sharder_suite_test.go
function TestSharder (line 26) | func TestSharder(t *testing.T) {
FILE: pkg/controller/shardlease/add.go
constant ControllerName (line 37) | ControllerName = "shardlease"
method AddToManager (line 40) | func (r *Reconciler) AddToManager(mgr manager.Manager) error {
method LeasePredicate (line 69) | func (r *Reconciler) LeasePredicate() predicate.Predicate {
FILE: pkg/controller/shardlease/reconciler.go
type Reconciler (line 40) | type Reconciler struct
method Reconcile (line 46) | func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Requ...
FILE: pkg/controller/shardlease/shardlease_suite_test.go
function TestShardLease (line 26) | func TestShardLease(t *testing.T) {
FILE: pkg/metrics/add.go
constant Namespace (line 27) | Namespace = "controller_sharding"
function AddToManager (line 30) | func AddToManager(mgr manager.Manager) error {
FILE: pkg/metrics/exporter/exporter.go
type Exporter (line 35) | type Exporter struct
type Metric (line 52) | type Metric struct
type GenerateFunc (line 65) | type GenerateFunc
method AddToManager (line 68) | func (e *Exporter[O, L]) AddToManager(mgr manager.Manager) error {
method NeedLeaderElection (line 87) | func (e *Exporter[O, L]) NeedLeaderElection() bool {
method Start (line 93) | func (e *Exporter[O, L]) Start(_ context.Context) error {
method Describe (line 101) | func (e *Exporter[O, L]) Describe(ch chan<- *prometheus.Desc) {
method Collect (line 107) | func (e *Exporter[O, L]) Collect(ch chan<- prometheus.Metric) {
method handleError (line 132) | func (e *Exporter[O, L]) handleError(ch chan<- prometheus.Metric, err er...
function GenerateStateSet (line 142) | func GenerateStateSet[O client.Object](knownStates []string, unknownStat...
function stateSetMetric (line 164) | func stateSetMetric(desc *prometheus.Desc, state string, value bool, sta...
function KnownStates (line 174) | func KnownStates[T ~string](s []T) []string {
function KnownStatesStringer (line 183) | func KnownStatesStringer[T fmt.Stringer](s []T) []string {
FILE: pkg/shard/controller/builder.go
type Builder (line 30) | type Builder struct
method For (line 47) | func (b *Builder) For(object client.Object) *Builder {
method WithClient (line 58) | func (b *Builder) WithClient(c client.Client) *Builder {
method InControllerRing (line 64) | func (b *Builder) InControllerRing(name string) *Builder {
method WithShardName (line 70) | func (b *Builder) WithShardName(name string) *Builder {
method MustBuild (line 76) | func (b *Builder) MustBuild(r reconcile.Reconciler) reconcile.Reconcil...
method Build (line 83) | func (b *Builder) Build(r reconcile.Reconciler) (reconcile.Reconciler,...
function NewShardedReconciler (line 42) | func NewShardedReconciler(mgr manager.Manager) *Builder {
FILE: pkg/shard/controller/builder_test.go
type fakeManager (line 147) | type fakeManager struct
method GetClient (line 152) | func (f fakeManager) GetClient() client.Client {
FILE: pkg/shard/controller/controller_suite_test.go
function TestController (line 26) | func TestController(t *testing.T) {
FILE: pkg/shard/controller/predicate.go
function Predicate (line 33) | func Predicate(controllerRingName, shardName string, predicates ...predi...
function isAssigned (line 42) | func isAssigned(controllerRingName, shardName string) predicate.Predicate {
function isDrained (line 48) | func isDrained(controllerRingName string) predicate.Predicate {
FILE: pkg/shard/controller/reconciler.go
type Reconciler (line 35) | type Reconciler struct
method Reconcile (line 53) | func (r *Reconciler) Reconcile(ctx context.Context, request reconcile....
FILE: pkg/shard/lease/lease.go
type Options (line 37) | type Options struct
function NewResourceLock (line 52) | func NewResourceLock(config *rest.Config, options Options) (resourcelock...
type LeaseLock (line 101) | type LeaseLock struct
method Get (line 112) | func (ll *LeaseLock) Get(ctx context.Context) (*resourcelock.LeaderEle...
method Create (line 127) | func (ll *LeaseLock) Create(ctx context.Context, ler resourcelock.Lead...
method Update (line 141) | func (ll *LeaseLock) Update(ctx context.Context, ler resourcelock.Lead...
method RecordEvent (line 159) | func (ll *LeaseLock) RecordEvent(string) {}
method Describe (line 163) | func (ll *LeaseLock) Describe() string {
method Identity (line 168) | func (ll *LeaseLock) Identity() string {
function mergeLabels (line 172) | func mergeLabels(obj *metav1.ObjectMeta, labels map[string]string) {
constant inClusterNamespacePath (line 181) | inClusterNamespacePath = "var/run/secrets/kubernetes.io/serviceaccount/n...
function getInClusterNamespace (line 186) | func getInClusterNamespace() (string, error) {
FILE: pkg/shard/lease/lease_suite_test.go
function TestLease (line 26) | func TestLease(t *testing.T) {
FILE: pkg/sharding/consistenthash/benchmark_test.go
function TestDistribution (line 25) | func TestDistribution(t *testing.T) {
function generateHostnames (line 52) | func generateHostnames(n int) []string {
function benchmarkRing (line 61) | func benchmarkRing(nodes int, tokensPerNode int, b *testing.B) {
function BenchmarkRing3_100 (line 71) | func BenchmarkRing3_100(b *testing.B) { benchmarkRing(3, 100, b) }
function BenchmarkRing3_1000 (line 72) | func BenchmarkRing3_1000(b *testing.B) { benchmarkRing(3, 1000, b) }
function BenchmarkRing5_100 (line 73) | func BenchmarkRing5_100(b *testing.B) { benchmarkRing(5, 100, b) }
function BenchmarkRing5_1000 (line 74) | func BenchmarkRing5_1000(b *testing.B) { benchmarkRing(5, 1000, b) }
function BenchmarkRing10_100 (line 75) | func BenchmarkRing10_100(b *testing.B) { benchmarkRing(10, 100, b) }
function BenchmarkRing10_1000 (line 76) | func BenchmarkRing10_1000(b *testing.B) { benchmarkRing(10, 1000, b) }
FILE: pkg/sharding/consistenthash/consistenthash_suite_test.go
function TestConsistentHash (line 26) | func TestConsistentHash(t *testing.T) {
FILE: pkg/sharding/consistenthash/ring.go
type Hash (line 27) | type Hash
constant DefaultTokensPerNode (line 33) | DefaultTokensPerNode = 100
function New (line 39) | func New(hash Hash, tokensPerNode int, initialNodes ...string) *Ring {
type Ring (line 62) | type Ring struct
method IsEmpty (line 71) | func (r *Ring) IsEmpty() bool {
method AddNodes (line 76) | func (r *Ring) AddNodes(nodes ...string) {
method Hash (line 90) | func (r *Ring) Hash(key string) string {
FILE: pkg/sharding/handler/controllerring.go
function MapControllerRingToLeases (line 34) | func MapControllerRingToLeases(reader client.Reader) handler.MapFunc {
FILE: pkg/sharding/handler/handler_suite_test.go
function TestHandler (line 26) | func TestHandler(t *testing.T) {
FILE: pkg/sharding/handler/lease.go
function MapLeaseToControllerRing (line 29) | func MapLeaseToControllerRing(_ context.Context, obj client.Object) []re...
FILE: pkg/sharding/key/key.go
function FuncForResource (line 32) | func FuncForResource(gr metav1.GroupResource, ring *shardingv1alpha1.Con...
type Func (line 57) | type Func
function ForObject (line 61) | func ForObject(obj client.Object) (string, error) {
function ForController (line 99) | func ForController(obj client.Object) (string, error) {
function forMetadata (line 123) | func forMetadata(group, kind, namespace, name string) string {
FILE: pkg/sharding/key/key_suite_test.go
function TestKey (line 26) | func TestKey(t *testing.T) {
FILE: pkg/sharding/leases/leases_suite_test.go
function TestLeases (line 26) | func TestLeases(t *testing.T) {
FILE: pkg/sharding/leases/shards.go
type Shard (line 26) | type Shard struct
type Shards (line 38) | type Shards
method ByID (line 41) | func (s Shards) ByID(id string) Shard {
method AvailableShards (line 52) | func (s Shards) AvailableShards() Shards {
method IDs (line 64) | func (s Shards) IDs() []string {
function ToShards (line 74) | func ToShards(leases []coordinationv1.Lease, now time.Time) Shards {
function ToShard (line 83) | func ToShard(lease *coordinationv1.Lease, now time.Time) Shard {
FILE: pkg/sharding/leases/state.go
type ShardState (line 27) | type ShardState
method String (line 49) | func (s ShardState) String() string {
method IsAvailable (line 85) | func (s ShardState) IsAvailable() bool {
constant Unknown (line 31) | Unknown ShardState = iota
constant Orphaned (line 33) | Orphaned
constant Dead (line 36) | Dead
constant Uncertain (line 38) | Uncertain
constant Expired (line 40) | Expired
constant Ready (line 42) | Ready
function StateFromString (line 67) | func StateFromString(state string) ShardState {
function ToState (line 90) | func ToState(lease *coordinationv1.Lease, now time.Time) ShardState {
function toState (line 94) | func toState(lease *coordinationv1.Lease, t Times) ShardState {
FILE: pkg/sharding/leases/times.go
constant defaultLeaseDuration (line 26) | defaultLeaseDuration = 15 * time.Second
constant leaseTTL (line 27) | leaseTTL = time.Minute
type Times (line 31) | type Times struct
function ToTimes (line 47) | func ToTimes(lease *coordinationv1.Lease, now time.Time) Times {
FILE: pkg/sharding/leases/times_test.go
function matchTimes (line 118) | func matchTimes(actual, expected Times) {
FILE: pkg/sharding/predicate/controllerring.go
function ControllerRingCreatedOrUpdated (line 26) | func ControllerRingCreatedOrUpdated() predicate.Predicate {
FILE: pkg/sharding/predicate/lease.go
function IsShardLease (line 31) | func IsShardLease() predicate.Predicate {
function ShardLeaseStateChanged (line 43) | func ShardLeaseStateChanged(clock clock.PassiveClock) predicate.Predicate {
function ShardLeaseAvailabilityChanged (line 63) | func ShardLeaseAvailabilityChanged(clock clock.PassiveClock) predicate.P...
FILE: pkg/sharding/predicate/predicate_suite_test.go
function TestPredicate (line 26) | func TestPredicate(t *testing.T) {
FILE: pkg/sharding/ring/ring.go
function FromLeases (line 35) | func FromLeases(controllerRing *shardingv1alpha1.ControllerRing, leaseLi...
FILE: pkg/sharding/ring/ring_suite_test.go
function TestRing (line 26) | func TestRing(t *testing.T) {
FILE: pkg/sharding/ring/ring_test.go
function probeRingNodes (line 88) | func probeRingNodes(ring *consistenthash.Ring) []string {
FILE: pkg/utils/client/client_suite_test.go
function TestClient (line 26) | func TestClient(t *testing.T) {
FILE: pkg/utils/client/options.go
type ResourceVersion (line 28) | type ResourceVersion
method ApplyToGet (line 30) | func (r ResourceVersion) ApplyToGet(opts *client.GetOptions) {
method ApplyToList (line 37) | func (r ResourceVersion) ApplyToList(opts *client.ListOptions) {
FILE: pkg/utils/client/scheme.go
function init (line 31) | func init() {
FILE: pkg/utils/errors/errors_suite_test.go
function TestErrors (line 26) | func TestErrors(t *testing.T) {
FILE: pkg/utils/errors/multi.go
function FormatErrors (line 26) | func FormatErrors(es []error) string {
FILE: pkg/utils/healthz/cache.go
type cacheSyncWaiter (line 28) | type cacheSyncWaiter interface
function CacheSync (line 33) | func CacheSync(cacheSyncWaiter cacheSyncWaiter) healthz.Checker {
FILE: pkg/utils/healthz/cache_test.go
type fakeSyncWaiter (line 39) | type fakeSyncWaiter
method WaitForCacheSync (line 41) | func (f fakeSyncWaiter) WaitForCacheSync(_ context.Context) bool {
FILE: pkg/utils/healthz/healthz_suite_test.go
function TestHealthz (line 26) | func TestHealthz(t *testing.T) {
FILE: pkg/utils/pager/pager.go
constant defaultPageSize (line 31) | defaultPageSize = 500
constant defaultPageBufferSize (line 32) | defaultPageBufferSize = 10
type lister (line 36) | type lister interface
function New (line 41) | func New(reader lister) *ListPager {
type ListPager (line 55) | type ListPager struct
method EachListItem (line 78) | func (p *ListPager) EachListItem(ctx context.Context, list client.Obje...
method EachListItemWithAlloc (line 90) | func (p *ListPager) EachListItemWithAlloc(ctx context.Context, list cl...
method eachListChunkBuffered (line 106) | func (p *ListPager) eachListChunkBuffered(ctx context.Context, list cl...
method eachListChunk (line 159) | func (p *ListPager) eachListChunk(ctx context.Context, list client.Obj...
FILE: pkg/utils/pager/pager_suite_test.go
function TestPager (line 26) | func TestPager(t *testing.T) {
FILE: pkg/utils/pager/pager_test.go
type lister (line 200) | type lister struct
method List (line 208) | func (l *lister) List(_ context.Context, list client.ObjectList, opts ...
method getCalls (line 263) | func (l *lister) getCalls() int {
function podSlice (line 269) | func podSlice(offset, n int64) []corev1.Pod {
function havePods (line 279) | func havePods(i, j int) gomegatypes.GomegaMatcher {
FILE: pkg/utils/strings.go
function CapitalizeFirst (line 24) | func CapitalizeFirst(in string) string {
FILE: pkg/utils/test/envtest.go
function UseExistingCluster (line 25) | func UseExistingCluster() bool {
FILE: pkg/utils/test/matchers/condition.go
function OfType (line 36) | func OfType(conditionType string) gomegatypes.GomegaMatcher {
function WithStatus (line 41) | func WithStatus(status metav1.ConditionStatus) gomegatypes.GomegaMatcher {
function WithReason (line 46) | func WithReason(reason string) gomegatypes.GomegaMatcher {
function WithMessage (line 51) | func WithMessage(message string) gomegatypes.GomegaMatcher {
FILE: pkg/utils/test/matchers/errors.go
type errorMatcher (line 25) | type errorMatcher struct
method Match (line 30) | func (k *errorMatcher) Match(actual interface{}) (success bool, err er...
method FailureMessage (line 43) | func (k *errorMatcher) FailureMessage(actual interface{}) (message str...
method NegatedFailureMessage (line 46) | func (k *errorMatcher) NegatedFailureMessage(actual interface{}) (mess...
FILE: pkg/utils/test/matchers/matchers.go
function BeNotFoundError (line 29) | func BeNotFoundError() gomegatypes.GomegaMatcher {
function BeFunc (line 37) | func BeFunc(expected any) gomegatypes.GomegaMatcher {
FILE: pkg/utils/test/matchers/object.go
function HaveName (line 25) | func HaveName(name string) gomegatypes.GomegaMatcher {
function HaveNames (line 30) | func HaveNames(names ...string) gomegatypes.GomegaMatcher {
function HaveLabel (line 40) | func HaveLabel(key any) gomegatypes.GomegaMatcher {
function HaveLabelWithValue (line 45) | func HaveLabelWithValue(key, value any) gomegatypes.GomegaMatcher {
FILE: pkg/utils/test/object.go
function RandomSuffix (line 27) | func RandomSuffix() string {
FILE: pkg/utils/test/paths.go
function RepoRoot (line 27) | func RepoRoot() string {
function PathShardingCRDs (line 49) | func PathShardingCRDs() string {
FILE: pkg/utils/utils_suite_test.go
function TestUtils (line 26) | func TestUtils(t *testing.T) {
FILE: pkg/webhook/add.go
function AddToManager (line 30) | func AddToManager(ctx context.Context, mgr manager.Manager, config *conf...
FILE: pkg/webhook/sharder/add.go
constant webhookPathPrefix (line 35) | webhookPathPrefix = "/webhooks/sharder/controllerring/"
method AddToManager (line 38) | func (h *Handler) AddToManager(mgr manager.Manager) error {
function WebhookPathForControllerRing (line 58) | func WebhookPathForControllerRing(ring *shardingv1alpha1.ControllerRing)...
function ControllerRingForWebhookPath (line 64) | func ControllerRingForWebhookPath(requestPath string) (*shardingv1alpha1...
type ctxKey (line 77) | type ctxKey
constant keyRequestPath (line 80) | keyRequestPath ctxKey = 0
function NewContextWithRequestPath (line 83) | func NewContextWithRequestPath(ctx context.Context, r *http.Request) con...
function RequestPathFromContext (line 88) | func RequestPathFromContext(ctx context.Context) (string, error) {
FILE: pkg/webhook/sharder/handler.go
type Handler (line 41) | type Handler struct
method Handle (line 48) | func (h *Handler) Handle(ctx context.Context, req admission.Request) a...
function ControllerRingForRequest (line 117) | func ControllerRingForRequest(ctx context.Context, c client.Reader) (*sh...
FILE: pkg/webhook/sharder/handler_test.go
function newRequest (line 272) | func newRequest(obj client.Object) admission.Request {
function beAllowed (line 297) | func beAllowed(message any) gomegatypes.GomegaMatcher {
function bePatched (line 307) | func bePatched() gomegatypes.GomegaMatcher {
function beErrored (line 317) | func beErrored(message any) gomegatypes.GomegaMatcher {
function applyPatches (line 327) | func applyPatches[T client.Object](obj T, patches []jsonpatchtypes.Opera...
type fakeMetrics (line 348) | type fakeMetrics struct
method ObserveAssignment (line 352) | func (f *fakeMetrics) ObserveAssignment(string, metav1.GroupResource) {
FILE: pkg/webhook/sharder/metrics.go
type Metrics (line 25) | type Metrics interface
type realMetrics (line 29) | type realMetrics struct
method ObserveAssignment (line 31) | func (realMetrics) ObserveAssignment(controllerRingName string, gr met...
FILE: pkg/webhook/sharder/sharder_suite_test.go
function TestSharder (line 26) | func TestSharder(t *testing.T) {
FILE: test/e2e/checksum_controller_test.go
constant checksumControllerName (line 48) | checksumControllerName = "checksum-controller"
constant namePrefixChecksums (line 49) | namePrefixChecksums = "checksums-"
constant objectCount (line 51) | objectCount = 100
function describeScaleController (line 222) | func describeScaleController(text string, replicas int32) {
function newSecret (line 239) | func newSecret(name string) *corev1.Secret {
function itDeploymentShouldBeAvailable (line 249) | func itDeploymentShouldBeAvailable(deployment **appsv1.Deployment, expec...
function itControllerRingShouldBeReady (line 266) | func itControllerRingShouldBeReady() {
function itControllerRingShouldHaveAvailableShards (line 283) | func itControllerRingShouldHaveAvailableShards(expectedAvailableShards i...
function itShouldRecognizeReadyShardLeases (line 301) | func itShouldRecognizeReadyShardLeases(expectedCount int) {
function itCreateShardLease (line 318) | func itCreateShardLease(lease **coordinationv1.Lease) {
function itShouldReportShardLeaseState (line 330) | func itShouldReportShardLeaseState(lease **coordinationv1.Lease, state l...
function itCreateSecrets (line 340) | func itCreateSecrets() {
function itScaleController (line 351) | func itScaleController(replicas int32) {
function scaleController (line 359) | func scaleController(ctx context.Context, replicas int32) {
function itShouldAssignObjectsToAvailableShards (line 369) | func itShouldAssignObjectsToAvailableShards() {
function eventuallyShouldAssignObjectsToAvailableShards (line 381) | func eventuallyShouldAssignObjectsToAvailableShards(ctx context.Context,...
function newLease (line 400) | func newLease(leaseDurationSeconds int32) *coordinationv1.Lease {
function toMapOfConfigMap (line 418) | func toMapOfConfigMap(configMaps []corev1.ConfigMap) map[string]*corev1....
FILE: test/e2e/e2e_suite_test.go
function TestE2E (line 47) | func TestE2E(t *testing.T) {
constant testID (line 53) | testID = "e2e-controller-sharding"
constant ShortTimeout (line 55) | ShortTimeout = 10 * time.Second
constant MediumTimeout (line 56) | MediumTimeout = time.Minute
FILE: test/integration/shard/controller/controller_suite_test.go
function TestController (line 42) | func TestController(t *testing.T) {
constant testID (line 47) | testID = "shard-controller-test"
FILE: test/integration/shard/controller/controller_test.go
function itShouldReconcile (line 88) | func itShouldReconcile() {
function itShouldNotReconcile (line 96) | func itShouldNotReconcile() {
function itShouldDrain (line 104) | func itShouldDrain() {
function itShouldNotDrain (line 114) | func itShouldNotDrain() {
function itAddDrainLabel (line 129) | func itAddDrainLabel() {
function itShouldRemoveShardingLabels (line 137) | func itShouldRemoveShardingLabels() {
function haveStatus (line 148) | func haveStatus(status string) gomegatypes.GomegaMatcher {
function ignoreObject (line 152) | func ignoreObject() {
function assignThisShard (line 156) | func assignThisShard() {
function assignOtherShard (line 160) | func assignOtherShard() {
function drainObject (line 164) | func drainObject() {
FILE: test/integration/shard/controller/reconciler.go
constant LabelKey (line 38) | LabelKey = "status"
constant LabelValuePending (line 39) | LabelValuePending = "pending"
constant LabelValueDone (line 40) | LabelValueDone = "done"
constant LabelValueIgnore (line 41) | LabelValueIgnore = "ignore"
type Reconciler (line 45) | type Reconciler struct
method AddToManager (line 50) | func (r *Reconciler) AddToManager(mgr manager.Manager, controllerRingN...
method Reconcile (line 77) | func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Requ...
function ConfigMapPredicate (line 70) | func ConfigMapPredicate() predicate.Predicate {
FILE: test/integration/shard/lease/lease_suite_test.go
function TestController (line 39) | func TestController(t *testing.T) {
constant testID (line 44) | testID = "shard-lease-test"
FILE: test/integration/sharder/controller/controllerring/controllerring_suite_test.go
function TestControllerRing (line 48) | func TestControllerRing(t *testing.T) {
constant testID (line 53) | testID = "controllerring-controller-test"
FILE: test/integration/sharder/controller/controllerring/controllerring_test.go
function newLease (line 143) | func newLease(controllerRingName string) *coordinationv1.Lease {
function haveStatusShards (line 163) | func haveStatusShards(availableShards, shards int32) gomegatypes.GomegaM...
FILE: test/integration/sharder/controller/sharder/sharder_suite_test.go
function TestSharder (line 49) | func TestSharder(t *testing.T) {
constant testID (line 54) | testID = "sharder-controller-test"
FILE: test/integration/sharder/controller/sharder/sharder_test.go
function newObject (line 215) | func newObject[T client.Object](obj T) T {
function newLease (line 221) | func newLease() *coordinationv1.Lease {
function overwriteKeyFuncs (line 241) | func overwriteKeyFuncs(fn key.Func) func() {
function keyForShard (line 252) | func keyForShard(shardName string) key.Func {
FILE: test/integration/sharder/controller/shardlease/shardlease_suite_test.go
function TestShardLease (line 47) | func TestShardLease(t *testing.T) {
constant testID (line 52) | testID = "shardlease-controller-test"
FILE: test/integration/sharder/controller/shardlease/shardlease_test.go
function haveState (line 141) | func haveState(state string) gomegatypes.GomegaMatcher {
FILE: test/integration/sharder/webhook/sharder/sharder_suite_test.go
function TestSharder (line 52) | func TestSharder(t *testing.T) {
constant testID (line 57) | testID = "sharder-webhook-test"
FILE: test/integration/sharder/webhook/sharder/sharder_test.go
function newLease (line 180) | func newLease() *coordinationv1.Lease {
function beAssigned (line 200) | func beAssigned(shard ...string) gomegatypes.GomegaMatcher {
function createObject (line 208) | func createObject(obj client.Object, opts ...client.CreateOption) func(c...
function patchObject (line 221) | func patchObject(obj client.Object) func(ctx context.Context) (client.Ob...
function drainObject (line 227) | func drainObject(obj client.Object) func(ctx context.Context) (client.Ob...
FILE: webhosting-operator/cmd/experiment/main.go
function init (line 56) | func init() {
function main (line 61) | func main() {
FILE: webhosting-operator/cmd/measure/main.go
constant stdin (line 42) | stdin = "-"
constant stdout (line 43) | stdout = "-"
function main (line 61) | func main() {
type timeValue (line 105) | type timeValue
method Type (line 107) | func (t *timeValue) Type() string { return "time" }
method String (line 108) | func (t *timeValue) String() string { return (*time.Time)(t).UTC().For...
method Set (line 110) | func (t *timeValue) Set(s string) error {
function newClient (line 135) | func newClient() (v1.API, error) {
constant QueryTypeInstant (line 147) | QueryTypeInstant = "instant"
constant QueryTypeRange (line 148) | QueryTypeRange = "range"
type QueriesConfig (line 151) | type QueriesConfig struct
type Query (line 155) | type Query struct
method fetchData (line 248) | func (q Query) fetchData(ctx context.Context, c v1.API) (metricData, e...
method writeResult (line 298) | func (q Query) writeResult(data metricData) error {
method verifySLO (line 354) | func (q Query) verifySLO(data metricData) (checked bool, ok bool) {
function run (line 166) | func run(ctx context.Context, c v1.API) error {
function decodeQueriesConfig (line 222) | func decodeQueriesConfig() (*QueriesConfig, error) {
function prepareOutputDir (line 232) | func prepareOutputDir() error {
function rangeToString (line 385) | func rangeToString(r v1.Range) string {
type metricData (line 391) | type metricData interface
type metricValue (line 402) | type metricValue struct
method IsZero (line 408) | func (m metricValue) IsZero() bool {
type matrixData (line 412) | type matrixData struct
method IsEmpty (line 420) | func (m *matrixData) IsEmpty() bool {
method GetLabelNames (line 424) | func (m *matrixData) GetLabelNames() model.LabelNames {
method Reset (line 439) | func (m *matrixData) Reset() {
method NextValue (line 444) | func (m *matrixData) NextValue() metricValue {
type vectorData (line 473) | type vectorData struct
method IsEmpty (line 479) | func (v *vectorData) IsEmpty() bool {
method GetLabelNames (line 483) | func (v *vectorData) GetLabelNames() model.LabelNames {
method Reset (line 498) | func (v *vectorData) Reset() {
method NextValue (line 502) | func (v *vectorData) NextValue() metricValue {
function toStringSlice (line 524) | func toStringSlice[S ~[]E, E ~string](s S) []string {
FILE: webhosting-operator/cmd/samples-generator/main.go
function init (line 51) | func init() {
function main (line 56) | func main() {
function generateSamples (line 82) | func generateSamples(ctx context.Context, c client.Client) error {
FILE: webhosting-operator/cmd/webhosting-operator/main.go
function init (line 66) | func init() {
function main (line 74) | func main() {
type options (line 129) | type options struct
method AddFlags (line 140) | func (o *options) AddFlags(fs *flag.FlagSet) {
method Complete (line 146) | func (o *options) Complete() error {
method applyConfigToRESTConfig (line 189) | func (o *options) applyConfigToRESTConfig() {
method applyConfigToOptions (line 200) | func (o *options) applyConfigToOptions() {
method applyOptionsOverrides (line 233) | func (o *options) applyOptionsOverrides() error {
function dropUnwantedMetadata (line 292) | func dropUnwantedMetadata(i interface{}) (interface{}, error) {
FILE: webhosting-operator/pkg/apis/config/v1alpha1/defaults.go
function addDefaultingFuncs (line 31) | func addDefaultingFuncs(scheme *runtime.Scheme) error {
function SetDefaults_WebhostingOperatorConfig (line 35) | func SetDefaults_WebhostingOperatorConfig(obj *WebhostingOperatorConfig) {
function SetDefaults_LeaderElectionConfiguration (line 52) | func SetDefaults_LeaderElectionConfiguration(obj *componentbaseconfigv1a...
function SetDefaults_DebuggingConfiguration (line 67) | func SetDefaults_DebuggingConfiguration(obj *componentbaseconfigv1alpha1...
function SetDefaults_HealthEndpoint (line 75) | func SetDefaults_HealthEndpoint(obj *HealthEndpoint) {
function SetDefaults_MetricsEndpoint (line 81) | func SetDefaults_MetricsEndpoint(obj *MetricsEndpoint) {
FILE: webhosting-operator/pkg/apis/config/v1alpha1/register.go
constant GroupName (line 28) | GroupName = "config.webhosting.timebertt.dev"
function addKnownTypes (line 41) | func addKnownTypes(scheme *runtime.Scheme) error {
FILE: webhosting-operator/pkg/apis/config/v1alpha1/types.go
type WebhostingOperatorConfig (line 28) | type WebhostingOperatorConfig struct
type HealthEndpoint (line 58) | type HealthEndpoint struct
type MetricsEndpoint (line 68) | type MetricsEndpoint struct
type IngressConfiguration (line 78) | type IngressConfiguration struct
FILE: webhosting-operator/pkg/apis/config/v1alpha1/zz_generated.deepcopy.go
method DeepCopyInto (line 31) | func (in *HealthEndpoint) DeepCopyInto(out *HealthEndpoint) {
method DeepCopy (line 36) | func (in *HealthEndpoint) DeepCopy() *HealthEndpoint {
method DeepCopyInto (line 46) | func (in *IngressConfiguration) DeepCopyInto(out *IngressConfiguration) {
method DeepCopy (line 70) | func (in *IngressConfiguration) DeepCopy() *IngressConfiguration {
method DeepCopyInto (line 80) | func (in *MetricsEndpoint) DeepCopyInto(out *MetricsEndpoint) {
method DeepCopy (line 85) | func (in *MetricsEndpoint) DeepCopy() *MetricsEndpoint {
method DeepCopyInto (line 95) | func (in *WebhostingOperatorConfig) DeepCopyInto(out *WebhostingOperator...
method DeepCopy (line 128) | func (in *WebhostingOperatorConfig) DeepCopy() *WebhostingOperatorConfig {
method DeepCopyObject (line 138) | func (in *WebhostingOperatorConfig) DeepCopyObject() runtime.Object {
FILE: webhosting-operator/pkg/apis/config/v1alpha1/zz_generated.defaults.go
function RegisterDefaults (line 31) | func RegisterDefaults(scheme *runtime.Scheme) error {
function SetObjectDefaults_WebhostingOperatorConfig (line 36) | func SetObjectDefaults_WebhostingOperatorConfig(in *WebhostingOperatorCo...
FILE: webhosting-operator/pkg/apis/webhosting/v1alpha1/constants.go
constant NamespaceSystem (line 25) | NamespaceSystem = "webhosting-system"
constant WebhostingOperatorName (line 27) | WebhostingOperatorName = "webhosting-operator"
constant LabelKeyProject (line 29) | LabelKeyProject = "webhosting.timebertt.dev/project"
constant LabelValueProject (line 31) | LabelValueProject = "true"
constant LabelKeySkipWorkload (line 35) | LabelKeySkipWorkload = "skip-workload"
FILE: webhosting-operator/pkg/apis/webhosting/v1alpha1/register.go
constant GroupName (line 29) | GroupName = "webhosting.timebertt.dev"
function addKnownTypes (line 42) | func addKnownTypes(scheme *runtime.Scheme) error {
FILE: webhosting-operator/pkg/apis/webhosting/v1alpha1/types_theme.go
type ThemeSpec (line 24) | type ThemeSpec struct
type ThemeStatus (line 32) | type ThemeStatus struct
type Theme (line 43) | type Theme struct
type ThemeList (line 59) | type ThemeList struct
FILE: webhosting-operator/pkg/apis/webhosting/v1alpha1/types_website.go
type WebsiteSpec (line 24) | type WebsiteSpec struct
type WebsiteStatus (line 30) | type WebsiteStatus struct
type WebsitePhase (line 43) | type WebsitePhase
constant PhasePending (line 47) | PhasePending WebsitePhase = "Pending"
constant PhaseReady (line 49) | PhaseReady WebsitePhase = "Ready"
constant PhaseError (line 51) | PhaseError WebsitePhase = "Error"
constant PhaseTerminating (line 53) | PhaseTerminating WebsitePhase = "Terminating"
type Website (line 67) | type Website struct
type WebsiteList (line 83) | type WebsiteList struct
FILE: webhosting-operator/pkg/apis/webhosting/v1alpha1/zz_generated.deepcopy.go
method DeepCopyInto (line 28) | func (in *Theme) DeepCopyInto(out *Theme) {
method DeepCopy (line 37) | func (in *Theme) DeepCopy() *Theme {
method DeepCopyObject (line 47) | func (in *Theme) DeepCopyObject() runtime.Object {
method DeepCopyInto (line 55) | func (in *ThemeList) DeepCopyInto(out *ThemeList) {
method DeepCopy (line 69) | func (in *ThemeList) DeepCopy() *ThemeList {
method DeepCopyObject (line 79) | func (in *ThemeList) DeepCopyObject() runtime.Object {
method DeepCopyInto (line 87) | func (in *ThemeSpec) DeepCopyInto(out *ThemeSpec) {
method DeepCopy (line 92) | func (in *ThemeSpec) DeepCopy() *ThemeSpec {
method DeepCopyInto (line 102) | func (in *ThemeStatus) DeepCopyInto(out *ThemeStatus) {
method DeepCopy (line 107) | func (in *ThemeStatus) DeepCopy() *ThemeStatus {
method DeepCopyInto (line 117) | func (in *Website) DeepCopyInto(out *Website) {
method DeepCopy (line 126) | func (in *Website) DeepCopy() *Website {
method DeepCopyObject (line 136) | func (in *Website) DeepCopyObject() runtime.Object {
method DeepCopyInto (line 144) | func (in *WebsiteList) DeepCopyInto(out *WebsiteList) {
method DeepCopy (line 158) | func (in *WebsiteList) DeepCopy() *WebsiteList {
method DeepCopyObject (line 168) | func (in *WebsiteList) DeepCopyObject() runtime.Object {
method DeepCopyInto (line 176) | func (in *WebsiteSpec) DeepCopyInto(out *WebsiteSpec) {
method DeepCopy (line 181) | func (in *WebsiteSpec) DeepCopy() *WebsiteSpec {
method DeepCopyInto (line 191) | func (in *WebsiteStatus) DeepCopyInto(out *WebsiteStatus) {
method DeepCopy (line 200) | func (in *WebsiteStatus) DeepCopy() *WebsiteStatus {
FILE: webhosting-operator/pkg/controllers/webhosting/suite_test.go
function TestController (line 40) | func TestController(t *testing.T) {
FILE: webhosting-operator/pkg/controllers/webhosting/templates/index.go
function init (line 36) | func init() {
function RenderIndexHTML (line 45) | func RenderIndexHTML(serverName string, website *webhostingv1alpha1.Webs...
function ExecuteIndexHTMLTemplate (line 52) | func ExecuteIndexHTMLTemplate(w io.Writer, serverName string, website *w...
FILE: webhosting-operator/pkg/controllers/webhosting/templates/internal/examples.go
function CreateExamples (line 26) | func CreateExamples() (string, *webhostingv1alpha1.Website, *webhostingv...
FILE: webhosting-operator/pkg/controllers/webhosting/templates/nginx.go
function init (line 35) | func init() {
function RenderNginxConf (line 44) | func RenderNginxConf(serverName string, website *webhostingv1alpha1.Webs...
FILE: webhosting-operator/pkg/controllers/webhosting/templates/templates_suite_test.go
function TestTemplates (line 26) | func TestTemplates(t *testing.T) {
FILE: webhosting-operator/pkg/controllers/webhosting/templates/testserver/server.go
function main (line 27) | func main() {
FILE: webhosting-operator/pkg/controllers/webhosting/website_controller.go
type WebsiteReconciler (line 61) | type WebsiteReconciler struct
method Reconcile (line 84) | func (r *WebsiteReconciler) Reconcile(ctx context.Context, website *we...
method reconcileWebsite (line 110) | func (r *WebsiteReconciler) reconcileWebsite(ctx context.Context, log ...
method recordError (line 173) | func (r *WebsiteReconciler) recordError(website *webhostingv1alpha1.We...
method reconcileConfigMap (line 186) | func (r *WebsiteReconciler) reconcileConfigMap(ctx context.Context, lo...
method reconcileService (line 217) | func (r *WebsiteReconciler) reconcileService(ctx context.Context, log ...
method reconcileIngress (line 243) | func (r *WebsiteReconciler) reconcileIngress(ctx context.Context, log ...
method reconcileDeployment (line 329) | func (r *WebsiteReconciler) reconcileDeployment(ctx context.Context, l...
method SetupWithManager (line 460) | func (r *WebsiteReconciler) SetupWithManager(mgr manager.Manager, enab...
method MapThemeToWebsites (line 539) | func (r *WebsiteReconciler) MapThemeToWebsites(ctx context.Context, th...
constant reasonReconcilerError (line 171) | reasonReconcilerError = "ReconcilerError"
constant keyIndexHTML (line 181) | keyIndexHTML = "index.html"
constant keyNginxConf (line 182) | keyNginxConf = "nginx.conf"
constant portNameHTTP (line 183) | portNameHTTP = "http"
function applyIngressConfigToIngress (line 294) | func applyIngressConfigToIngress(config *configv1alpha1.IngressConfigura...
function getLabelsForServer (line 416) | func getLabelsForServer(serverName, name string) map[string]string {
function mergeMaps (line 425) | func mergeMaps[M interface{ ~map[K]V }, K comparable, V any](mm ...M) M {
function calculateServerName (line 433) | func calculateServerName(website *webhostingv1alpha1.Website) string {
function calculateConfigMapChecksum (line 444) | func calculateConfigMapChecksum(configMap *corev1.ConfigMap) (string, er...
constant ControllerName (line 455) | ControllerName = "website"
constant websiteThemeField (line 456) | websiteThemeField = "spec.theme"
function skipWorkload (line 625) | func skipWorkload(website *webhostingv1alpha1.Website) bool {
FILE: webhosting-operator/pkg/experiment/generator/options.go
type GenerateOption (line 23) | type GenerateOption interface
type GenerateOptions (line 27) | type GenerateOptions struct
method ApplyOptions (line 32) | func (o *GenerateOptions) ApplyOptions(opts ...GenerateOption) *Genera...
method ApplyToOptions (line 39) | func (o *GenerateOptions) ApplyToOptions(options *GenerateOptions) {
method ApplyToObject (line 55) | func (o *GenerateOptions) ApplyToObject(obj *metav1.ObjectMeta) {
type GenerateOptionFunc (line 65) | type GenerateOptionFunc
method ApplyToOptions (line 67) | func (f GenerateOptionFunc) ApplyToOptions(options *GenerateOptions) {
function WithLabels (line 71) | func WithLabels(labels map[string]string) GenerateOption {
function WithOwnerReference (line 83) | func WithOwnerReference(ownerRef *metav1.OwnerReference) GenerateOption {
FILE: webhosting-operator/pkg/experiment/generator/project.go
function CreateProjects (line 31) | func CreateProjects(ctx context.Context, c client.Client, n int, opts .....
function CreateProject (line 40) | func CreateProject(ctx context.Context, c client.Client, opts ...Generat...
function CleanupProjects (line 62) | func CleanupProjects(ctx context.Context, c client.Client, labels map[st...
FILE: webhosting-operator/pkg/experiment/generator/reconciler.go
constant defaultReconcileWorkers (line 39) | defaultReconcileWorkers = 10
type Every (line 42) | type Every struct
method AddToManager (line 52) | func (r *Every) AddToManager(mgr manager.Manager) error {
method Reconcile (line 87) | func (r *Every) Reconcile(ctx context.Context, _ reconcile.Request) (r...
type ForEach (line 98) | type ForEach struct
method AddToManager (line 111) | func (r *ForEach[T]) AddToManager(mgr manager.Manager) error {
method Reconcile (line 156) | func (r *ForEach[T]) Reconcile(ctx context.Context, request reconcile.Re...
function unlimitedRateLimiter (line 174) | func unlimitedRateLimiter() workqueue.TypedRateLimiter[reconcile.Request] {
FILE: webhosting-operator/pkg/experiment/generator/theme.go
function CreateThemes (line 36) | func CreateThemes(ctx context.Context, c client.Client, n int, opts ...G...
function CreateTheme (line 45) | func CreateTheme(ctx context.Context, c client.Client, opts ...GenerateO...
function MutateTheme (line 68) | func MutateTheme(ctx context.Context, c client.Client, theme *webhosting...
function MutateRandomTheme (line 83) | func MutateRandomTheme(ctx context.Context, c client.Client, labels map[...
FILE: webhosting-operator/pkg/experiment/generator/utils.go
function EmitN (line 48) | func EmitN(n int, delay time.Duration) source.Source {
function StopOnContextCanceled (line 64) | func StopOnContextCanceled(r reconcile.Reconciler) reconcile.Reconciler {
function NTimesConcurrently (line 76) | func NTimesConcurrently(n, workers int, do func() error) error {
function RetryOnError (line 126) | func RetryOnError(ctx context.Context, retries int, do func(context.Cont...
function CreateClusterScopedOwnerObject (line 153) | func CreateClusterScopedOwnerObject(ctx context.Context, c client.Client...
type constantDelayRateLimiter (line 173) | type constantDelayRateLimiter
method When (line 175) | func (d constantDelayRateLimiter) When(reconcile.Request) time.Duratio...
method Forget (line 176) | func (d constantDelayRateLimiter) Forget(reconcile.Request) ...
method NumRequeues (line 177) | func (d constantDelayRateLimiter) NumRequeues(reconcile.Request) int ...
FILE: webhosting-operator/pkg/experiment/generator/website.go
type WebsiteTracker (line 39) | type WebsiteTracker interface
function SetWebsiteTracker (line 44) | func SetWebsiteTracker(tracker WebsiteTracker) {
function CreateWebsites (line 51) | func CreateWebsites(ctx context.Context, c client.Client, n int, opts .....
function CreateWebsite (line 60) | func CreateWebsite(ctx context.Context, c client.Client, opts ...Generat...
function MutateWebsite (line 105) | func MutateWebsite(ctx context.Context, c client.Client, website *webhos...
function ReconcileWebsite (line 133) | func ReconcileWebsite(ctx context.Context, c client.Client, website *web...
function DeleteWebsite (line 146) | func DeleteWebsite(ctx context.Context, c client.Client, labels map[stri...
FILE: webhosting-operator/pkg/experiment/scenario.go
type Scenario (line 29) | type Scenario interface
function RegisterScenario (line 43) | func RegisterScenario(s Scenario) {
function GetAllScenarios (line 52) | func GetAllScenarios() []Scenario {
function GetScenario (line 66) | func GetScenario(s string) Scenario {
FILE: webhosting-operator/pkg/experiment/scenario/base/base.go
type Scenario (line 44) | type Scenario struct
method Name (line 66) | func (s *Scenario) Name() string {
method Done (line 70) | func (s *Scenario) Done() <-chan struct{} {
method AddToManager (line 74) | func (s *Scenario) AddToManager(mgr manager.Manager) error {
method Start (line 90) | func (s *Scenario) Start(ctx context.Context) (err error) {
method prepare (line 149) | func (s *Scenario) prepare(ctx context.Context) (func(context.Context)...
method injectRunIDLabel (line 215) | func (s *Scenario) injectRunIDLabel(ctx context.Context, namespace, na...
method waitForDeployment (line 226) | func (s *Scenario) waitForDeployment(ctx context.Context, namespace, n...
method waitForShardLeases (line 249) | func (s *Scenario) waitForShardLeases(ctx context.Context) error {
method Wait (line 278) | func (s *Scenario) Wait(ctx context.Context, d time.Duration) error {
type Delegate (line 61) | type Delegate interface
FILE: webhosting-operator/pkg/experiment/scenario/basic/basic.go
constant ScenarioName (line 33) | ScenarioName = "basic"
function init (line 35) | func init() {
type scenario (line 45) | type scenario struct
method Description (line 49) | func (s *scenario) Description() string {
method LongDescription (line 53) | func (s *scenario) LongDescription() string {
method Prepare (line 61) | func (s *scenario) Prepare(ctx context.Context) error {
method Run (line 75) | func (s *scenario) Run(ctx context.Context) error {
FILE: webhosting-operator/pkg/experiment/scenario/chaos/chaos.go
constant ScenarioName (line 36) | ScenarioName = "chaos"
function init (line 38) | func init() {
type scenario (line 48) | type scenario struct
method Description (line 52) | func (s *scenario) Description() string {
method LongDescription (line 56) | func (s *scenario) LongDescription() string {
method Prepare (line 64) | func (s *scenario) Prepare(ctx context.Context) error {
method Run (line 78) | func (s *scenario) Run(ctx context.Context) error {
function terminateRandomShard (line 115) | func terminateRandomShard(ctx context.Context, c client.Client) error {
FILE: webhosting-operator/pkg/experiment/scenario/rolling-update/rolling_update.go
constant ScenarioName (line 37) | ScenarioName = "rolling-update"
function init (line 39) | func init() {
type scenario (line 49) | type scenario struct
method Description (line 53) | func (s *scenario) Description() string {
method LongDescription (line 57) | func (s *scenario) LongDescription() string {
method Prepare (line 66) | func (s *scenario) Prepare(ctx context.Context) error {
method Run (line 80) | func (s *scenario) Run(ctx context.Context) error {
function triggerRollingUpdate (line 129) | func triggerRollingUpdate(ctx context.Context, c client.Client) error {
FILE: webhosting-operator/pkg/experiment/scenario/scale-out/scale_out.go
constant ScenarioName (line 33) | ScenarioName = "scale-out"
function init (line 35) | func init() {
type scenario (line 45) | type scenario struct
method Description (line 49) | func (s *scenario) Description() string {
method LongDescription (line 53) | func (s *scenario) LongDescription() string {
method Prepare (line 60) | func (s *scenario) Prepare(ctx context.Context) error {
method Run (line 74) | func (s *scenario) Run(ctx context.Context) error {
FILE: webhosting-operator/pkg/experiment/tracker/tracker.go
type tracker (line 29) | type tracker struct
method sortedValues (line 36) | func (t *tracker[K, V]) sortedValues() []*V {
method set (line 55) | func (t *tracker[K, V]) set(key K, mutate func(v *V)) {
FILE: webhosting-operator/pkg/experiment/tracker/website.go
type WebsiteTracker (line 75) | type WebsiteTracker struct
method AddToManager (line 115) | func (w *WebsiteTracker) AddToManager(mgr manager.Manager) error {
method websiteHandler (line 141) | func (w *WebsiteTracker) websiteHandler() handler.EventHandler {
method observedNewReadyGeneration (line 160) | func (w *WebsiteTracker) observedNewReadyGeneration() predicate.Predic...
method RecordSpecChange (line 189) | func (w *WebsiteTracker) RecordSpecChange(website *webhostingv1alpha1....
method recordStatusGeneration (line 201) | func (w *WebsiteTracker) recordStatusGeneration(website *webhostingv1a...
method Reconcile (line 243) | func (w *WebsiteTracker) Reconcile(ctx context.Context, req reconcile....
type objectGenerations (line 85) | type objectGenerations struct
method setGenerationCreated (line 213) | func (o *objectGenerations) setGenerationCreated(generation int64, t t...
method setGenerationReady (line 228) | func (o *objectGenerations) setGenerationReady(generation int64, t tim...
method reconcile (line 256) | func (o *objectGenerations) reconcile(log logr.Logger) reconcile.Result {
type generationTimes (line 92) | type generationTimes struct
function newObjectsTracker (line 97) | func newObjectsTracker() *tracker[types.UID, objectGenerations] {
function newGenerationsTracker (line 108) | func newGenerationsTracker() *tracker[int64, generationTimes] {
type generationsList (line 270) | type generationsList
method backfillGenerations (line 276) | func (gg generationsList) backfillGenerations() {
method recordNewReadyGenerations (line 299) | func (gg generationsList) recordNewReadyGenerations(log logr.Logger) b...
FILE: webhosting-operator/pkg/metrics/add.go
constant namespace (line 25) | namespace = "kube"
function AddToManager (line 28) | func AddToManager(mgr manager.Manager) error {
FILE: webhosting-operator/pkg/utils/kubernetes.go
function RunningInCluster (line 27) | func RunningInCluster() bool {
function IsDeploymentReady (line 38) | func IsDeploymentReady(deployment *appsv1.Deployment) bool {
function GetDeploymentCondition (line 60) | func GetDeploymentCondition(conditions []appsv1.DeploymentCondition, con...
FILE: webhosting-operator/pkg/utils/utils.go
function PickRandom (line 24) | func PickRandom[T any](in []T) T {
function RandomName (line 30) | func RandomName(n int) string {
FILE: webhosting-operator/test/e2e/e2e_suite_test.go
function TestMain (line 48) | func TestMain(m *testing.M) {
function TestE2E (line 54) | func TestE2E(t *testing.T) {
constant testID (line 60) | testID = "e2e-webhosting-operator"
constant ShortTimeout (line 62) | ShortTimeout = 10 * time.Second
constant MediumTimeout (line 63) | MediumTimeout = time.Minute
FILE: webhosting-operator/test/e2e/webhosting_operator_test.go
constant objectCount (line 47) | objectCount = 100
function describeScaleController (line 111) | func describeScaleController(text string, replicas int32) {
function newWebsite (line 130) | func newWebsite(name string) *webhostingv1alpha1.Website {
function itDeploymentShouldBeAvailable (line 146) | func itDeploymentShouldBeAvailable(expectedReplicas int32) {
function itControllerRingShouldHaveAvailableShards (line 158) | func itControllerRingShouldHaveAvailableShards(expectedAvailableShards i...
function itShouldRecognizeReadyShardLeases (line 176) | func itShouldRecognizeReadyShardLeases(expectedCount int) {
function itCreateWebsites (line 193) | func itCreateWebsites() {
function itWebsitesShouldBeReady (line 204) | func itWebsitesShouldBeReady() {
function itScaleController (line 216) | func itScaleController(replicas int32) {
function scaleController (line 224) | func scaleController(ctx context.Context, replicas int32) {
function itShouldAssignObjectsToAvailableShards (line 234) | func itShouldAssignObjectsToAvailableShards() {
function eventuallyShouldAssignObjectsToAvailableShards (line 244) | func eventuallyShouldAssignObjectsToAvailableShards(ctx context.Context,...
function listOf (line 264) | func listOf(obj client.Object) client.ObjectList {
Condensed preview — 484 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (6,770K chars).
[
{
"path": ".gitattributes",
"chars": 54,
"preview": "docs/assets/*.jpg filter=lfs diff=lfs merge=lfs -text\n"
},
{
"path": ".github/ISSUE_TEMPLATE/bug.md",
"chars": 348,
"preview": "---\nname: Bug Report\nabout: Report a bug encountered while using this project\nlabels: bug\n---\n\n**What happened**:\n\n**Wha"
},
{
"path": ".github/ISSUE_TEMPLATE/enhancement.md",
"chars": 163,
"preview": "---\nname: Enhancement Request\nabout: Suggest an enhancement to this project\nlabels: enhancement\n---\n\n**What would you li"
},
{
"path": ".github/PULL_REQUEST_TEMPLATE.md",
"chars": 121,
"preview": "**What this PR does / why we need it**:\n\n**Which issue(s) this PR fixes**:\nFixes #\n\n**Special notes for your reviewer**:"
},
{
"path": ".github/release.yaml",
"chars": 439,
"preview": "changelog:\n exclude:\n labels:\n - no-release-note\n categories:\n - title: ⚠️ Breaking Changes\n labels:\n - b"
},
{
"path": ".github/renovate.json5",
"chars": 7701,
"preview": "{\n $schema: 'https://docs.renovatebot.com/renovate-schema.json',\n extends: [\n 'config:recommended',\n ':semanticC"
},
{
"path": ".github/workflows/e2e.yaml",
"chars": 500,
"preview": "name: e2e\n\non:\n push:\n branches:\n - main\n tags:\n - v*\n paths-ignore:\n - \"**.md\"\n pull_request:\n\njobs"
},
{
"path": ".github/workflows/images.yaml",
"chars": 1493,
"preview": "name: images\n\non:\n push:\n branches:\n - main\n tags:\n - v*\n pull_request:\n\njobs:\n images:\n runs-on: ubun"
},
{
"path": ".github/workflows/release-notes.yaml",
"chars": 900,
"preview": "name: release-notes\n\non:\n push:\n branches:\n - main\n workflow_dispatch: {}\n\njobs:\n release-notes:\n runs-on: u"
},
{
"path": ".github/workflows/renovate.yaml",
"chars": 1498,
"preview": "name: renovate\n\non:\n push:\n branches:\n - renovate/*\n\njobs:\n post-update:\n runs-on: ubuntu-latest\n\n steps:\n"
},
{
"path": ".github/workflows/verify.yaml",
"chars": 273,
"preview": "name: verify\n\non:\n push:\n branches:\n - main\n tags:\n - v*\n pull_request:\n\njobs:\n verify:\n runs-on: ubun"
},
{
"path": ".gitignore",
"chars": 333,
"preview": "*.secret*\n.envrc\nhack/kind_kubeconfig.yaml\n.gitguardian.yaml\n.ko.yaml\n\n# Binaries for programs and plugins\n*.exe\n*.exe~\n"
},
{
"path": ".golangci.yaml",
"chars": 1927,
"preview": "version: \"2\"\n\nrun:\n concurrency: 4\n\nlinters:\n enable:\n - copyloopvar\n - ginkgolinter\n - gocritic\n - gosec\n - impo"
},
{
"path": ".run/experiment (kind).run.xml",
"chars": 746,
"preview": "<component name=\"ProjectRunConfigurationManager\">\n <configuration default=\"false\" name=\"experiment (kind)\" type=\"GoAppl"
},
{
"path": ".run/shard (kind).run.xml",
"chars": 795,
"preview": "<component name=\"ProjectRunConfigurationManager\">\n <configuration default=\"false\" name=\"checksum-controller (kind)\" typ"
},
{
"path": ".run/sharder (kind).run.xml",
"chars": 801,
"preview": "<component name=\"ProjectRunConfigurationManager\">\n <configuration default=\"false\" name=\"sharder (kind)\" type=\"GoApplica"
},
{
"path": ".run/webhosting-operator (kind).run.xml",
"chars": 767,
"preview": "<component name=\"ProjectRunConfigurationManager\">\n <configuration default=\"false\" name=\"webhosting-operator (kind)\" typ"
},
{
"path": "LICENSE",
"chars": 11357,
"preview": " Apache License\n Version 2.0, January 2004\n "
},
{
"path": "Makefile",
"chars": 8053,
"preview": "PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))\n\n# Image URL to use all building/pushing image t"
},
{
"path": "README.md",
"chars": 6189,
"preview": "# Kubernetes Controller Sharding\n\n_Horizontally Scalable Kubernetes Controllers_ 🚀\n\n## TL;DR 📖\n\nMake Kubernetes controll"
},
{
"path": "cmd/checksum-controller/main.go",
"chars": 6623,
"preview": "/*\nCopyright 2023 Tim Ebert.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file "
},
{
"path": "cmd/checksum-controller/reconciler.go",
"chars": 4758,
"preview": "/*\nCopyright 2023 Tim Ebert.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file "
},
{
"path": "cmd/sharder/app/app.go",
"chars": 3343,
"preview": "/*\nCopyright 2023 Tim Ebert.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file "
},
{
"path": "cmd/sharder/app/options.go",
"chars": 6564,
"preview": "/*\nCopyright 2023 Tim Ebert.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file "
},
{
"path": "cmd/sharder/main.go",
"chars": 862,
"preview": "/*\nCopyright 2023 Tim Ebert.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file "
},
{
"path": "config/README.md",
"chars": 326,
"preview": "# config\n\nThis directory hosts manifests for deploying the sharding components.\nManifests of components for the developm"
},
{
"path": "config/certificate/certificate.yaml",
"chars": 308,
"preview": "apiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\n name: webhook-server\nspec:\n issuerRef:\n name: selfsigne"
},
{
"path": "config/certificate/issuer.yaml",
"chars": 96,
"preview": "apiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\n name: selfsigned\nspec:\n selfSigned: {}\n"
},
{
"path": "config/certificate/kustomization.yaml",
"chars": 663,
"preview": "apiVersion: kustomize.config.k8s.io/v1alpha1\nkind: Component\n\nnamespace: sharding-system\n\nlabels:\n- includeSelectors: tr"
},
{
"path": "config/crds/kustomization.yaml",
"chars": 230,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nlabels:\n- includeSelectors: true\n pairs:\n app.kuber"
},
{
"path": "config/crds/namespace.yaml",
"chars": 65,
"preview": "apiVersion: v1\nkind: Namespace\nmetadata:\n name: sharding-system\n"
},
{
"path": "config/crds/sharding.timebertt.dev_controllerrings.yaml",
"chars": 11655,
"preview": "---\napiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubeb"
},
{
"path": "config/default/kustomization.yaml",
"chars": 129,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nresources:\n- ../crds\n- ../sharder\n\ncomponents:\n- ../cer"
},
{
"path": "config/monitoring/kustomization.yaml",
"chars": 289,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nnamespace: sharding-system\n\nresources:\n- servicemonitor"
},
{
"path": "config/monitoring/prometheus_rbac.yaml",
"chars": 772,
"preview": "---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n labels:\n app.kubernetes.io/component: prometheus\n"
},
{
"path": "config/monitoring/servicemonitor.yaml",
"chars": 735,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: sharder\n labels:\n app.kubernetes.io/name"
},
{
"path": "config/rbac/kustomization.yaml",
"chars": 437,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nresources:\n- serviceaccount.yaml\n- leader_election.yaml"
},
{
"path": "config/rbac/leader_election.yaml",
"chars": 581,
"preview": "---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: sharding:sharder:leader-election\nrules:\n- apiG"
},
{
"path": "config/rbac/metrics_auth.yaml",
"chars": 580,
"preview": "---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: sharding:metrics-auth\nrules:\n- apiGroup"
},
{
"path": "config/rbac/pprof_reader.yaml",
"chars": 393,
"preview": "---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: sharding:sharder:pprof-reader\nrules:\n- "
},
{
"path": "config/rbac/role.yaml",
"chars": 725,
"preview": "---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: sharder\nrules:\n- apiGroups:\n - \"\"\n re"
},
{
"path": "config/rbac/rolebinding.yaml",
"chars": 275,
"preview": "---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: sharding:sharder\nroleRef:\n apiG"
},
{
"path": "config/rbac/serviceaccount.yaml",
"chars": 98,
"preview": "apiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: sharder\nautomountServiceAccountToken: false\n"
},
{
"path": "config/sharder/config.yaml",
"chars": 397,
"preview": "apiVersion: config.sharding.timebertt.dev/v1alpha1\nkind: SharderConfig\nwebhook:\n config:\n annotations:\n # Techn"
},
{
"path": "config/sharder/deployment.yaml",
"chars": 2007,
"preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: sharder\n namespace: sharding-system\n labels:\n app.kubernetes"
},
{
"path": "config/sharder/kustomization.yaml",
"chars": 577,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nnamespace: sharding-system\n\ngeneratorOptions:\n disable"
},
{
"path": "config/sharder/poddisruptionbudget.yaml",
"chars": 223,
"preview": "apiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n labels:\n app.kubernetes.io/component: sharder\n name: sha"
},
{
"path": "config/sharder/service.yaml",
"chars": 370,
"preview": "apiVersion: v1\nkind: Service\nmetadata:\n name: sharder\n namespace: sharding-system\n labels:\n app.kubernetes.io/comp"
},
{
"path": "docs/README.md",
"chars": 429,
"preview": "# Documentation Index\n\n- [Getting Started With Controller Sharding](getting-started.md) ⬅️ start here, if you're new to "
},
{
"path": "docs/design.md",
"chars": 10948,
"preview": "# Design\n\nThis document explains the sharding design in more detail.\nPlease also consider reading the respective design "
},
{
"path": "docs/development.md",
"chars": 7005,
"preview": "# Development and Testing Setup\n\nThis document explains more details of the development and testing setup that is also p"
},
{
"path": "docs/evaluation.md",
"chars": 12663,
"preview": "# Evaluating the Sharding Mechanism\n\nThis guide describes how the sharding mechanism implemented in this repository is e"
},
{
"path": "docs/getting-started.md",
"chars": 14076,
"preview": "# Getting Started With Controller Sharding\n\nThis guide walks you through getting started with controller sharding in a l"
},
{
"path": "docs/implement-sharding.md",
"chars": 12571,
"preview": "# Implement Sharding in Your Controller\n\nThis guide walks you through implementing sharding for your own controller.\nPre"
},
{
"path": "docs/installation.md",
"chars": 2034,
"preview": "# Install the Sharding Components\n\nThis guide walks you through installing the sharding components from this repository "
},
{
"path": "docs/monitoring.md",
"chars": 3568,
"preview": "# Monitoring the Sharding Components\n\nThis document explains the metrics exposed by the sharder for monitoring the shard"
},
{
"path": "go.mod",
"chars": 4954,
"preview": "module github.com/timebertt/kubernetes-controller-sharding\n\ngo 1.24.0\n\ntoolchain go1.25.7\n\nrequire (\n\tgithub.com/cespare"
},
{
"path": "go.sum",
"chars": 27195,
"preview": "cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=\ncel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX"
},
{
"path": "go.work",
"chars": 65,
"preview": "go 1.24.0\n\ntoolchain go1.25.0\n\nuse (\n\t.\n\t./webhosting-operator\n)\n"
},
{
"path": "go.work.sum",
"chars": 14657,
"preview": "cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=\ncloud.google.com/go/c"
},
{
"path": "hack/boilerplate.go.txt",
"chars": 557,
"preview": "/*\nCopyright 2023 Tim Ebert.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file "
},
{
"path": "hack/ci-common.sh",
"chars": 561,
"preview": "export_artifacts() {\n [ -n \"${ARTIFACTS:-}\" ] || return 0\n\n mkdir -p \"$ARTIFACTS\"\n cluster_name=sharding\n echo \"> Ex"
},
{
"path": "hack/ci-e2e-kind.sh",
"chars": 387,
"preview": "#!/usr/bin/env bash\n\nset -o nounset\nset -o pipefail\nset -o errexit\n\nsource \"$(dirname \"$0\")/ci-common.sh\"\n\n# test setup\n"
},
{
"path": "hack/config/README.md",
"chars": 281,
"preview": "# dev\n\nThis directory hosts manifests of components for the development setup.\nManifests of the sharding components are "
},
{
"path": "hack/config/cert-manager/kustomization.yaml",
"chars": 338,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nresources:\n- https://github.com/cert-manager/cert-manag"
},
{
"path": "hack/config/cert-manager/patch-mutatingwebhook.yaml",
"chars": 181,
"preview": "apiVersion: admissionregistration.k8s.io/v1\nkind: MutatingWebhookConfiguration\nmetadata:\n name: cert-manager-webhook\nwe"
},
{
"path": "hack/config/cert-manager/patch-validatingwebhook.yaml",
"chars": 183,
"preview": "apiVersion: admissionregistration.k8s.io/v1\nkind: ValidatingWebhookConfiguration\nmetadata:\n name: cert-manager-webhook\n"
},
{
"path": "hack/config/cert-manager/resources/cluster-issuer.yaml",
"chars": 327,
"preview": "apiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n name: letsencrypt-http01\nspec:\n acme:\n email: null@ti"
},
{
"path": "hack/config/cert-manager/resources/kustomization.yaml",
"chars": 98,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nresources:\n- cluster-issuer.yaml\n"
},
{
"path": "hack/config/certificates/host/config.json",
"chars": 255,
"preview": "{\n \"signing\": {\n \"default\": {\n \"expiry\": \"43800h\"\n },\n \"profiles\": {\n \"server\": {\n \"usages\": "
},
{
"path": "hack/config/certificates/host/generate.sh",
"chars": 435,
"preview": "#!/usr/bin/env bash\n\nif ! command -v cfssl &>/dev/null ; then\n echo \"cfssl not found, install it from https://github.co"
},
{
"path": "hack/config/certificates/host/kustomization.yaml",
"chars": 294,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nnamespace: sharding-system\n\ngeneratorOptions:\n disable"
},
{
"path": "hack/config/certificates/host/webhook-ca-key.pem",
"chars": 3243,
"preview": "-----BEGIN RSA PRIVATE KEY-----\nMIIJKQIBAAKCAgEAsYRPd2BqRLB8TX/5bWZgZzRHihzB49P3t88lkfzbkinOFSWr\nIkQNnC1LOmCim39S0Un6ucE"
},
{
"path": "hack/config/certificates/host/webhook-ca.json",
"chars": 83,
"preview": "{\n \"CN\": \"sharding:sharder\",\n \"key\": {\n \"algo\": \"rsa\",\n \"size\": 4096\n }\n}\n"
},
{
"path": "hack/config/certificates/host/webhook-ca.pem",
"chars": 1801,
"preview": "-----BEGIN CERTIFICATE-----\nMIIFBjCCAu6gAwIBAgIUel12x8tnTJwEPcuKhb8gxDzNL8UwDQYJKoZIhvcNAQEN\nBQAwGzEZMBcGA1UEAxMQc2hhcmR"
},
{
"path": "hack/config/certificates/host/webhook-server-key.pem",
"chars": 3243,
"preview": "-----BEGIN RSA PRIVATE KEY-----\nMIIJKAIBAAKCAgEAnDCsNJ5AoV5lrrsOAtzvDn7tZsqqM0e68p6UGPijBcmPS1Qj\nVRsbHyjWnd/L3bAJSOu7pwu"
},
{
"path": "hack/config/certificates/host/webhook-server.json",
"chars": 268,
"preview": "{\n \"CN\": \"sharding:sharder:webhook\",\n \"key\": {\n \"algo\": \"rsa\",\n \"size\": 4096\n },\n \"hosts\": [\n \"localhost\",\n"
},
{
"path": "hack/config/certificates/host/webhook-server.pem",
"chars": 2082,
"preview": "-----BEGIN CERTIFICATE-----\nMIIF1TCCA72gAwIBAgIURVQwgiERbtUeNjwxwSrp4jIAY+UwDQYJKoZIhvcNAQEN\nBQAwGzEZMBcGA1UEAxMQc2hhcmR"
},
{
"path": "hack/config/checksum-controller/controller/deployment.yaml",
"chars": 643,
"preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: checksum-controller\nspec:\n replicas: 3\n template:\n spec:\n "
},
{
"path": "hack/config/checksum-controller/controller/kustomization.yaml",
"chars": 446,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nnamespace: default\n\nlabels:\n- includeSelectors: true\n "
},
{
"path": "hack/config/checksum-controller/controller/rbac.yaml",
"chars": 738,
"preview": "---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: sharding:checksum-controller\nrules:\n- apiGroup"
},
{
"path": "hack/config/checksum-controller/controller/serviceaccount.yaml",
"chars": 110,
"preview": "apiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: checksum-controller\nautomountServiceAccountToken: false\n"
},
{
"path": "hack/config/checksum-controller/controllerring/controllerring.yaml",
"chars": 307,
"preview": "apiVersion: sharding.timebertt.dev/v1alpha1\nkind: ControllerRing\nmetadata:\n name: checksum-controller\nspec:\n resources"
},
{
"path": "hack/config/checksum-controller/controllerring/kustomization.yaml",
"chars": 118,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nresources:\n- controllerring.yaml\n- sharder_rbac.yaml\n"
},
{
"path": "hack/config/checksum-controller/controllerring/sharder_rbac.yaml",
"chars": 808,
"preview": "# These manifests grant the sharder controller permissions to act on resources that we listed in the ControllerRing.\n# W"
},
{
"path": "hack/config/external-dns/kustomization.yaml",
"chars": 751,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nnamespace: external-dns\n\nimages:\n- name: registry.k8s.i"
},
{
"path": "hack/config/external-dns/namespace.yaml",
"chars": 62,
"preview": "apiVersion: v1\nkind: Namespace\nmetadata:\n name: external-dns\n"
},
{
"path": "hack/config/external-dns/patch-deployment.yaml",
"chars": 1309,
"preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: external-dns\n namespace: default\nspec:\n template:\n spec:\n "
},
{
"path": "hack/config/ingress-nginx/default/kustomization.yaml",
"chars": 1106,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nnamespace: ingress-nginx\n\nresources:\n- https://raw.gith"
},
{
"path": "hack/config/ingress-nginx/default/patch_controller_resources.yaml",
"chars": 264,
"preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: ingress-nginx-controller\n namespace: ingress-nginx\nspec:\n templ"
},
{
"path": "hack/config/ingress-nginx/default/patch_default_ingress_class.yaml",
"chars": 174,
"preview": "apiVersion: networking.k8s.io/v1\nkind: IngressClass\nmetadata:\n name: nginx\n namespace: ingress-nginx\n annotations:\n "
},
{
"path": "hack/config/ingress-nginx/kind/kustomization.yaml",
"chars": 135,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nresources:\n- ../default\n\npatches:\n- path: patch_service"
},
{
"path": "hack/config/ingress-nginx/kind/patch_service_nodeport.yaml",
"chars": 223,
"preview": "apiVersion: v1\nkind: Service\nmetadata:\n name: ingress-nginx-controller\n namespace: ingress-nginx\nspec:\n ports:\n - ap"
},
{
"path": "hack/config/ingress-nginx/shoot/certificate.yaml",
"chars": 304,
"preview": "apiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\n name: webhosting-tls\nspec:\n dnsNames:\n - webhosting.timeb"
},
{
"path": "hack/config/ingress-nginx/shoot/kustomization.yaml",
"chars": 445,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nnamespace: ingress-nginx\n\nresources:\n- ../default\n- cer"
},
{
"path": "hack/config/ingress-nginx/shoot/patch_service.yaml",
"chars": 186,
"preview": "apiVersion: v1\nkind: Service\nmetadata:\n name: ingress-nginx-controller\n namespace: ingress-nginx\n annotations:\n ex"
},
{
"path": "hack/config/kind-config.yaml",
"chars": 460,
"preview": "apiVersion: kind.x-k8s.io/v1alpha4\nkind: Cluster\nnodes:\n- role: control-plane\n extraPortMappings:\n # ingress-nginx\n -"
},
{
"path": "hack/config/kyverno/kustomization.yaml",
"chars": 800,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nresources:\n- https://github.com/kyverno/kyverno/release"
},
{
"path": "hack/config/monitoring/crds/0alertmanagerConfigCustomResourceDefinition.yaml",
"chars": 693041,
"preview": "apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuild"
},
{
"path": "hack/config/monitoring/crds/0alertmanagerCustomResourceDefinition.yaml",
"chars": 573557,
"preview": "apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuild"
},
{
"path": "hack/config/monitoring/crds/0podmonitorCustomResourceDefinition.yaml",
"chars": 63358,
"preview": "apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuild"
},
{
"path": "hack/config/monitoring/crds/0probeCustomResourceDefinition.yaml",
"chars": 60926,
"preview": "apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuild"
},
{
"path": "hack/config/monitoring/crds/0prometheusCustomResourceDefinition.yaml",
"chars": 765393,
"preview": "apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuild"
},
{
"path": "hack/config/monitoring/crds/0prometheusagentCustomResourceDefinition.yaml",
"chars": 646626,
"preview": "apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuild"
},
{
"path": "hack/config/monitoring/crds/0prometheusruleCustomResourceDefinition.yaml",
"chars": 7640,
"preview": "apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuild"
},
{
"path": "hack/config/monitoring/crds/0scrapeconfigCustomResourceDefinition.yaml",
"chars": 657740,
"preview": "apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuild"
},
{
"path": "hack/config/monitoring/crds/0servicemonitorCustomResourceDefinition.yaml",
"chars": 69143,
"preview": "apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuild"
},
{
"path": "hack/config/monitoring/crds/0thanosrulerCustomResourceDefinition.yaml",
"chars": 555348,
"preview": "apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n annotations:\n controller-gen.kubebuild"
},
{
"path": "hack/config/monitoring/crds/README.md",
"chars": 221,
"preview": "The CRDs in this directory were downloaded from\nhttps://github.com/prometheus-operator/kube-prometheus/tree/v0.16.0/mani"
},
{
"path": "hack/config/monitoring/crds/kustomization.yaml",
"chars": 747,
"preview": "# Code generated by update.sh, DO NOT EDIT.\napiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nlabels:\n- i"
},
{
"path": "hack/config/monitoring/default/dashboards/client-go.json",
"chars": 30758,
"preview": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n "
},
{
"path": "hack/config/monitoring/default/dashboards/controller-details.json",
"chars": 41973,
"preview": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n "
},
{
"path": "hack/config/monitoring/default/dashboards/controller-runtime.json",
"chars": 45865,
"preview": "{\n \"annotations\": {\n \"list\": [\n {\n \"builtIn\": 1,\n \"datasource\": {\n \"type\": \"grafana\",\n "
},
{
"path": "hack/config/monitoring/default/ensure-admin-password.sh",
"chars": 176,
"preview": "#!/usr/bin/env bash\n\ndir=\"$(dirname \"$0\")\"\nfile=\"$dir/grafana_admin_password.secret.txt\"\n\n[ -f \"$file\" ] && exit 0\ncat /"
},
{
"path": "hack/config/monitoring/default/grafana_ingress.yaml",
"chars": 566,
"preview": "apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n annotations:\n cert-manager.io/cluster-issuer: letsencrypt-"
},
{
"path": "hack/config/monitoring/default/kustomization.yaml",
"chars": 1400,
"preview": "apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\ncomponents:\n- ../grafana-sidecar\n\nresources:\n- namespac"
},
{
"path": "hack/config/monitoring/default/namespace.yaml",
"chars": 60,
"preview": "apiVersion: v1\nkind: Namespace\nmetadata:\n name: monitoring\n"
},
{
"path": "hack/config/monitoring/default/patch_grafana_admin.yaml",
"chars": 716,
"preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: grafana\n namespace: monitoring\nspec:\n template:\n spec:\n "
},
{
"path": "hack/config/monitoring/default/patch_grafana_networkpolicy.yaml",
"chars": 333,
"preview": "- op: add\n path: /spec/ingress/-\n value:\n from:\n - podSelector:\n matchLabels:\n app.kubernetes.io"
},
{
"path": "hack/config/monitoring/default/patch_kubelet_metrics.yaml",
"chars": 902,
"preview": "# drop storage operation duration metrics (high cardinality)\n- op: add\n path: /spec/endpoints/0/metricRelabelings/-\n v"
},
{
"path": "hack/config/monitoring/default/patch_kubestatemetrics.yaml",
"chars": 736,
"preview": "# drop kube-state-metrics metrics for project namespaces\n- op: add\n path: /spec/template/spec/containers/0/args/-\n val"
},
{
"path": "hack/config/monitoring/default/patch_kubestatemetrics_servicemonitor.yaml",
"chars": 330,
"preview": "# label map for label.prometheus.io/* labels\n- op: add\n path: /spec/endpoints/0/metricRelabelings/-\n value:\n action"
},
{
"path": "hack/config/monitoring/default/patch_prometheus.yaml",
"chars": 338,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: Prometheus\nmetadata:\n name: k8s\n namespace: monitoring\nspec:\n replicas: 1 "
},
{
"path": "hack/config/monitoring/default/rbac-proxy_clusterrole.yaml",
"chars": 280,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: rbac-proxy\nrules:\n- apiGroups:\n - authenti"
},
{
"path": "hack/config/monitoring/grafana-sidecar/dashboards-sidecar.yaml",
"chars": 157,
"preview": "apiVersion: 1\nproviders:\n- folder: Default\n folderUid: \"\"\n name: \"1\"\n options:\n path: /grafana-dashboard-definitio"
},
{
"path": "hack/config/monitoring/grafana-sidecar/kustomization.yaml",
"chars": 355,
"preview": "apiVersion: kustomize.config.k8s.io/v1alpha1\nkind: Component\n\ngeneratorOptions:\n disableNameSuffixHash: true\n\nconfigMap"
},
{
"path": "hack/config/monitoring/grafana-sidecar/patch_grafana_sidecar.yaml",
"chars": 880,
"preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: grafana\n namespace: monitoring\nspec:\n template:\n spec:\n "
},
{
"path": "hack/config/monitoring/grafana-sidecar/sidecar_clusterrole.yaml",
"chars": 230,
"preview": "kind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n labels:\n app.kubernetes.io/name: grafana\n nam"
},
{
"path": "hack/config/monitoring/grafana-sidecar/sidecar_clusterrolebinding.yaml",
"chars": 310,
"preview": "kind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n labels:\n app.kubernetes.io/name: grafan"
},
{
"path": "hack/config/monitoring/kube-prometheus/README.md",
"chars": 220,
"preview": "The manifests in this directory were downloaded from\nhttps://github.com/prometheus-operator/kube-prometheus/tree/v0.16.0"
},
{
"path": "hack/config/monitoring/kube-prometheus/blackboxExporter-clusterRole.yaml",
"chars": 470,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n app.kubernetes.io/component: exporter"
},
{
"path": "hack/config/monitoring/kube-prometheus/blackboxExporter-clusterRoleBinding.yaml",
"chars": 461,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n app.kubernetes.io/component: e"
},
{
"path": "hack/config/monitoring/kube-prometheus/blackboxExporter-configuration.yaml",
"chars": 1392,
"preview": "apiVersion: v1\ndata:\n config.yml: |-\n \"modules\":\n \"http_2xx\":\n \"http\":\n \"preferred_ip_protocol\""
},
{
"path": "hack/config/monitoring/kube-prometheus/blackboxExporter-deployment.yaml",
"chars": 3645,
"preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app.kubernetes.io/component: exporter\n app.kubernetes.io"
},
{
"path": "hack/config/monitoring/kube-prometheus/blackboxExporter-networkPolicy.yaml",
"chars": 722,
"preview": "apiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n labels:\n app.kubernetes.io/component: exporter\n a"
},
{
"path": "hack/config/monitoring/kube-prometheus/blackboxExporter-service.yaml",
"chars": 540,
"preview": "apiVersion: v1\nkind: Service\nmetadata:\n labels:\n app.kubernetes.io/component: exporter\n app.kubernetes.io/name: b"
},
{
"path": "hack/config/monitoring/kube-prometheus/blackboxExporter-serviceAccount.yaml",
"chars": 315,
"preview": "apiVersion: v1\nautomountServiceAccountToken: false\nkind: ServiceAccount\nmetadata:\n labels:\n app.kubernetes.io/compon"
},
{
"path": "hack/config/monitoring/kube-prometheus/blackboxExporter-serviceMonitor.yaml",
"chars": 680,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n app.kubernetes.io/component: exporter\n"
},
{
"path": "hack/config/monitoring/kube-prometheus/grafana-config.yaml",
"chars": 345,
"preview": "apiVersion: v1\nkind: Secret\nmetadata:\n labels:\n app.kubernetes.io/component: grafana\n app.kubernetes.io/name: gra"
},
{
"path": "hack/config/monitoring/kube-prometheus/grafana-dashboardDatasources.yaml",
"chars": 681,
"preview": "apiVersion: v1\nkind: Secret\nmetadata:\n labels:\n app.kubernetes.io/component: grafana\n app.kubernetes.io/name: gra"
},
{
"path": "hack/config/monitoring/kube-prometheus/grafana-dashboardDefinitions.yaml",
"chars": 1020919,
"preview": "apiVersion: v1\nitems:\n- apiVersion: v1\n data:\n alertmanager-overview.json: |-\n {\n \"graphTooltip\": 1,\n "
},
{
"path": "hack/config/monitoring/kube-prometheus/grafana-dashboardSources.yaml",
"chars": 659,
"preview": "apiVersion: v1\ndata:\n dashboards.yaml: |-\n {\n \"apiVersion\": 1,\n \"providers\": [\n {\n "
},
{
"path": "hack/config/monitoring/kube-prometheus/grafana-deployment.yaml",
"chars": 11491,
"preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app.kubernetes.io/component: grafana\n app.kubernetes.io/"
},
{
"path": "hack/config/monitoring/kube-prometheus/grafana-networkPolicy.yaml",
"chars": 652,
"preview": "apiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n labels:\n app.kubernetes.io/component: grafana\n ap"
},
{
"path": "hack/config/monitoring/kube-prometheus/grafana-prometheusRule.yaml",
"chars": 1423,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: PrometheusRule\nmetadata:\n labels:\n app.kubernetes.io/component: grafana\n "
},
{
"path": "hack/config/monitoring/kube-prometheus/grafana-service.yaml",
"chars": 453,
"preview": "apiVersion: v1\nkind: Service\nmetadata:\n labels:\n app.kubernetes.io/component: grafana\n app.kubernetes.io/name: gr"
},
{
"path": "hack/config/monitoring/kube-prometheus/grafana-serviceAccount.yaml",
"chars": 294,
"preview": "apiVersion: v1\nautomountServiceAccountToken: false\nkind: ServiceAccount\nmetadata:\n labels:\n app.kubernetes.io/compon"
},
{
"path": "hack/config/monitoring/kube-prometheus/grafana-serviceMonitor.yaml",
"chars": 399,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n app.kubernetes.io/component: grafana\n "
},
{
"path": "hack/config/monitoring/kube-prometheus/kubePrometheus-prometheusRule.yaml",
"chars": 4301,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: PrometheusRule\nmetadata:\n labels:\n app.kubernetes.io/component: exporter\n"
},
{
"path": "hack/config/monitoring/kube-prometheus/kubeStateMetrics-clusterRole.yaml",
"chars": 1998,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n app.kubernetes.io/component: exporter"
},
{
"path": "hack/config/monitoring/kube-prometheus/kubeStateMetrics-clusterRoleBinding.yaml",
"chars": 465,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n app.kubernetes.io/component: e"
},
{
"path": "hack/config/monitoring/kube-prometheus/kubeStateMetrics-deployment.yaml",
"chars": 3617,
"preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app.kubernetes.io/component: exporter\n app.kubernetes.io"
},
{
"path": "hack/config/monitoring/kube-prometheus/kubeStateMetrics-networkPolicy.yaml",
"chars": 724,
"preview": "apiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n labels:\n app.kubernetes.io/component: exporter\n a"
},
{
"path": "hack/config/monitoring/kube-prometheus/kubeStateMetrics-prometheusRule.yaml",
"chars": 3183,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: PrometheusRule\nmetadata:\n labels:\n app.kubernetes.io/component: exporter\n"
},
{
"path": "hack/config/monitoring/kube-prometheus/kubeStateMetrics-service.yaml",
"chars": 581,
"preview": "apiVersion: v1\nkind: Service\nmetadata:\n labels:\n app.kubernetes.io/component: exporter\n app.kubernetes.io/name: k"
},
{
"path": "hack/config/monitoring/kube-prometheus/kubeStateMetrics-serviceAccount.yaml",
"chars": 317,
"preview": "apiVersion: v1\nautomountServiceAccountToken: false\nkind: ServiceAccount\nmetadata:\n labels:\n app.kubernetes.io/compon"
},
{
"path": "hack/config/monitoring/kube-prometheus/kubeStateMetrics-serviceMonitor.yaml",
"chars": 1164,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n app.kubernetes.io/component: exporter\n"
},
{
"path": "hack/config/monitoring/kube-prometheus/kubernetesControlPlane-prometheusRule.yaml",
"chars": 83623,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: PrometheusRule\nmetadata:\n labels:\n app.kubernetes.io/component: kubernete"
},
{
"path": "hack/config/monitoring/kube-prometheus/kubernetesControlPlane-serviceMonitorApiserver.yaml",
"chars": 7808,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n app.kubernetes.io/component: kubernete"
},
{
"path": "hack/config/monitoring/kube-prometheus/kubernetesControlPlane-serviceMonitorCoreDNS.yaml",
"chars": 635,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n app.kubernetes.io/component: kubernete"
},
{
"path": "hack/config/monitoring/kube-prometheus/kubernetesControlPlane-serviceMonitorKubeControllerManager.yaml",
"chars": 6989,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n app.kubernetes.io/component: kubernete"
},
{
"path": "hack/config/monitoring/kube-prometheus/kubernetesControlPlane-serviceMonitorKubeScheduler.yaml",
"chars": 942,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n app.kubernetes.io/component: kubernete"
},
{
"path": "hack/config/monitoring/kube-prometheus/kubernetesControlPlane-serviceMonitorKubelet.yaml",
"chars": 8391,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n app.kubernetes.io/component: kubernete"
},
{
"path": "hack/config/monitoring/kube-prometheus/kustomization.yaml",
"chars": 2932,
"preview": "# Code generated by update.sh, DO NOT EDIT.\napiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\n\nresources:\n"
},
{
"path": "hack/config/monitoring/kube-prometheus/nodeExporter-clusterRole.yaml",
"chars": 461,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n app.kubernetes.io/component: exporter"
},
{
"path": "hack/config/monitoring/kube-prometheus/nodeExporter-clusterRoleBinding.yaml",
"chars": 444,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n app.kubernetes.io/component: e"
},
{
"path": "hack/config/monitoring/kube-prometheus/nodeExporter-daemonset.yaml",
"chars": 3772,
"preview": "apiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n labels:\n app.kubernetes.io/component: exporter\n app.kubernetes.io/"
},
{
"path": "hack/config/monitoring/kube-prometheus/nodeExporter-networkPolicy.yaml",
"chars": 671,
"preview": "apiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n labels:\n app.kubernetes.io/component: exporter\n a"
},
{
"path": "hack/config/monitoring/kube-prometheus/nodeExporter-prometheusRule.yaml",
"chars": 20329,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: PrometheusRule\nmetadata:\n labels:\n app.kubernetes.io/component: exporter\n"
},
{
"path": "hack/config/monitoring/kube-prometheus/nodeExporter-service.yaml",
"chars": 492,
"preview": "apiVersion: v1\nkind: Service\nmetadata:\n labels:\n app.kubernetes.io/component: exporter\n app.kubernetes.io/name: n"
},
{
"path": "hack/config/monitoring/kube-prometheus/nodeExporter-serviceAccount.yaml",
"chars": 306,
"preview": "apiVersion: v1\nautomountServiceAccountToken: false\nkind: ServiceAccount\nmetadata:\n labels:\n app.kubernetes.io/compon"
},
{
"path": "hack/config/monitoring/kube-prometheus/nodeExporter-serviceMonitor.yaml",
"chars": 850,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n app.kubernetes.io/component: exporter\n"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheus-clusterRole.yaml",
"chars": 447,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n app.kubernetes.io/component: promethe"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheus-clusterRoleBinding.yaml",
"chars": 482,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n app.kubernetes.io/component: p"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheus-networkPolicy.yaml",
"chars": 1072,
"preview": "apiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n labels:\n app.kubernetes.io/component: prometheus\n "
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheus-prometheus.yaml",
"chars": 1297,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: Prometheus\nmetadata:\n labels:\n app.kubernetes.io/component: prometheus\n "
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheus-prometheusRule.yaml",
"chars": 17268,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: PrometheusRule\nmetadata:\n labels:\n app.kubernetes.io/component: prometheu"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheus-roleBindingConfig.yaml",
"chars": 506,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n labels:\n app.kubernetes.io/component: promethe"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheus-roleBindingSpecificNamespaces.yaml",
"chars": 1658,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nitems:\n- apiVersion: rbac.authorization.k8s.io/v1\n kind: RoleBinding\n metadat"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheus-roleConfig.yaml",
"chars": 401,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n labels:\n app.kubernetes.io/component: prometheus\n "
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheus-roleSpecificNamespaces.yaml",
"chars": 2158,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nitems:\n- apiVersion: rbac.authorization.k8s.io/v1\n kind: Role\n metadata:\n "
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheus-service.yaml",
"chars": 636,
"preview": "apiVersion: v1\nkind: Service\nmetadata:\n labels:\n app.kubernetes.io/component: prometheus\n app.kubernetes.io/insta"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheus-serviceAccount.yaml",
"chars": 341,
"preview": "apiVersion: v1\nautomountServiceAccountToken: true\nkind: ServiceAccount\nmetadata:\n labels:\n app.kubernetes.io/compone"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheus-serviceMonitor.yaml",
"chars": 623,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n app.kubernetes.io/component: prometheu"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusAdapter-clusterRole.yaml",
"chars": 410,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n app.kubernetes.io/component: metrics-"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusAdapter-clusterRoleAggregatedMetricsReader.yaml",
"chars": 577,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n app.kubernetes.io/component: metrics-"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusAdapter-clusterRoleBinding.yaml",
"chars": 472,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n app.kubernetes.io/component: m"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusAdapter-clusterRoleBindingDelegator.yaml",
"chars": 495,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n app.kubernetes.io/component: m"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusAdapter-clusterRoleServerResources.yaml",
"chars": 379,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n app.kubernetes.io/component: metrics-"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusAdapter-configMap.yaml",
"chars": 2205,
"preview": "apiVersion: v1\ndata:\n config.yaml: |-\n \"resourceRules\":\n \"cpu\":\n \"containerLabel\": \"container\"\n \""
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusAdapter-deployment.yaml",
"chars": 3426,
"preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app.kubernetes.io/component: metrics-adapter\n app.kubern"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusAdapter-networkPolicy.yaml",
"chars": 565,
"preview": "apiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n labels:\n app.kubernetes.io/component: metrics-adapte"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusAdapter-podDisruptionBudget.yaml",
"chars": 502,
"preview": "apiVersion: policy/v1\nkind: PodDisruptionBudget\nmetadata:\n labels:\n app.kubernetes.io/component: metrics-adapter\n "
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusAdapter-roleBindingAuthReader.yaml",
"chars": 516,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n labels:\n app.kubernetes.io/component: metrics-"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusAdapter-service.yaml",
"chars": 502,
"preview": "apiVersion: v1\nkind: Service\nmetadata:\n labels:\n app.kubernetes.io/component: metrics-adapter\n app.kubernetes.io/"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusAdapter-serviceAccount.yaml",
"chars": 324,
"preview": "apiVersion: v1\nautomountServiceAccountToken: false\nkind: ServiceAccount\nmetadata:\n labels:\n app.kubernetes.io/compon"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusAdapter-serviceMonitor.yaml",
"chars": 907,
"preview": "apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n app.kubernetes.io/component: metrics-a"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusOperator-clusterRole.yaml",
"chars": 1814,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n app.kubernetes.io/component: controll"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusOperator-clusterRoleBinding.yaml",
"chars": 471,
"preview": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n app.kubernetes.io/component: c"
},
{
"path": "hack/config/monitoring/kube-prometheus/prometheusOperator-deployment.yaml",
"chars": 2878,
"preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app.kubernetes.io/component: controller\n app.kubernetes."
}
]
// ... and 284 more files (download for full content)
About this extraction
This page contains the full source code of the timebertt/kubernetes-controller-sharding GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 484 files (6.2 MB), approximately 1.6M tokens, and a symbol index with 649 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.