main 2b2129b870d1 cached
387 files
16.2 MB
179.2k tokens
1 requests
Download .txt
Showing preview only (669K chars total). Download the full file or copy to clipboard to get everything.
Repository: stacksimplify/google-kubernetes-engine
Branch: main
Commit: 2b2129b870d1
Files: 387
Total size: 16.2 MB

Directory structure:
gitextract_6wj2qqr2/

├── .gitignore
├── 01-Create-GCP-Account/
│   └── README.md
├── 02-Create-GKE-Cluster/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       └── 02-kubernetes-loadbalancer-service.yaml
├── 03-gcloud-cli-install-macos/
│   └── README.md
├── 04-gcloud-cli-install-windowsos/
│   └── README.md
├── 05-Docker-For-Beginners/
│   └── README.md
├── 06-kubectl-imperative-k8s-pods/
│   └── README.md
├── 07-kubectl-declarative-k8s-ReplicaSets/
│   ├── README.md
│   └── replicaset-demo.yml
├── 08-kubectl-imperative-k8s-deployment-CREATE/
│   └── README.md
├── 09-kubectl-imperative-k8s-deployment-UPDATE/
│   └── README.md
├── 10-kubectl-imperative-k8s-deployment-ROLLBACK/
│   └── README.md
├── 11-kubectl-imperative-k8s-deployment-PAUSE-RESUME/
│   └── README.md
├── 12-kubectl-imperative-k8s-services/
│   └── README.md
├── 13-YAML-Basics/
│   ├── README.md
│   ├── sample-file.yml
│   └── yaml-demo.yaml
├── 14-yaml-declarative-k8s-pods/
│   ├── README.md
│   ├── kube-base-definition.yml
│   └── kube-manifests/
│       ├── 01-pod-definition.yml
│       └── 02-pod-LoadBalancer-service.yml
├── 15-yaml-declarative-k8s-replicasets/
│   ├── README.md
│   ├── kube-base-definition.yml
│   └── kube-manifests/
│       ├── 01-replicaset-definition.yml
│       └── 02-replicaset-LoadBalancer-servie.yml
├── 16-yaml-declarative-k8s-deployments/
│   ├── README.md
│   ├── kube-base-definition.yml
│   └── kube-manifests/
│       ├── 01-deployment-definition.yml
│       └── 02-deployment-LoadBalancer-servie.yml
├── 17-yaml-declarative-k8s-services/
│   ├── README.md
│   ├── kube-base-definition.yml
│   └── kube-manifests/
│       ├── 01-backend-deployment.yml
│       ├── 02-backend-clusterip-service.yml
│       ├── 03-frontend-deployment.yml
│       └── 04-frontend-LoadBalancer-service.yml
├── 18-GKE-NodePort-Service/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       └── 02-kubernetes-nodeport-service.yaml
├── 19-GKE-Headless-Service/
│   ├── 01-kube-manifests/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   ├── 02-kubernetes-clusterip-service.yaml
│   │   └── 03-kubernetes-headless-service.yaml
│   ├── 02-kube-manifests-curl/
│   │   └── 01-curl-pod.yml
│   └── README.md
├── 20-GKE-Private-Cluster/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       └── 02-kubernetes-loadbalancer-service.yaml
├── 21-GKE-PD-existing-SC-standard-rwo/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-persistent-volume-claim.yaml
│       ├── 02-UserManagement-ConfigMap.yaml
│       ├── 03-mysql-deployment.yaml
│       ├── 04-mysql-clusterip-service.yaml
│       ├── 05-UserMgmtWebApp-Deployment.yaml
│       └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 22-GKE-PD-existing-SC-premium-rwo/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-persistent-volume-claim.yaml
│       ├── 02-UserManagement-ConfigMap.yaml
│       ├── 03-mysql-deployment.yaml
│       ├── 04-mysql-clusterip-service.yaml
│       ├── 05-UserMgmtWebApp-Deployment.yaml
│       └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 23-GKE-PD-Custom-StorageClass/
│   ├── README.md
│   └── kube-manifests/
│       ├── 00-storage-class.yaml
│       ├── 01-persistent-volume-claim.yaml
│       ├── 02-UserManagement-ConfigMap.yaml
│       ├── 03-mysql-deployment.yaml
│       ├── 04-mysql-clusterip-service.yaml
│       ├── 05-UserMgmtWebApp-Deployment.yaml
│       └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 24-GKE-PD-preexisting-PD/
│   ├── README.md
│   └── kube-manifests/
│       ├── 00-persistent-volume.yaml
│       ├── 01-persistent-volume-claim.yaml
│       ├── 02-UserManagement-ConfigMap.yaml
│       ├── 03-mysql-deployment.yaml
│       ├── 04-mysql-clusterip-service.yaml
│       ├── 05-UserMgmtWebApp-Deployment.yaml
│       └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 25-GKE-PD-Regional-PD/
│   ├── README.md
│   └── kube-manifests/
│       ├── 00-storage-class.yaml
│       ├── 01-persistent-volume-claim.yaml
│       ├── 02-UserManagement-ConfigMap.yaml
│       ├── 03-mysql-deployment.yaml
│       ├── 04-mysql-clusterip-service.yaml
│       ├── 05-UserMgmtWebApp-Deployment.yaml
│       └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 26-GKE-PD-Volume-Snapshots-and-Restore/
│   ├── 01-kube-manifests/
│   │   ├── 01-persistent-volume-claim.yaml
│   │   ├── 02-UserManagement-ConfigMap.yaml
│   │   ├── 03-mysql-deployment.yaml
│   │   ├── 04-mysql-clusterip-service.yaml
│   │   ├── 05-UserMgmtWebApp-Deployment.yaml
│   │   └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   ├── 02-Volume-Snapshot/
│   │   ├── 01-VolumeSnapshotClass.yaml
│   │   └── 02-VolumeSnapshot.yaml
│   ├── 03-Volume-Restore/
│   │   ├── 01-restore-pvc.yaml
│   │   └── 02-mysql-deployment.yaml
│   └── README.md
├── 27-GKE-PD-Volume-Clone/
│   ├── 01-kube-manifests/
│   │   ├── 01-persistent-volume-claim.yaml
│   │   ├── 02-UserManagement-ConfigMap.yaml
│   │   ├── 03-mysql-deployment.yaml
│   │   ├── 04-mysql-clusterip-service.yaml
│   │   ├── 05-UserMgmtWebApp-Deployment.yaml
│   │   └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   ├── 02-Use-Cloned-Volume-kube-manifests/
│   │   ├── 01-podpvc-clone.yaml
│   │   ├── 02-UserManagement-ConfigMap.yaml
│   │   ├── 03-mysql-deployment.yaml
│   │   ├── 04-mysql-clusterip-service.yaml
│   │   ├── 05-UserMgmtWebApp-Deployment.yaml
│   │   └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   ├── 03-With-NodeSelectors/
│   │   ├── 01-kube-manifests/
│   │   │   ├── 01-persistent-volume-claim.yaml
│   │   │   ├── 02-UserManagement-ConfigMap.yaml
│   │   │   ├── 03-mysql-deployment.yaml
│   │   │   ├── 04-mysql-clusterip-service.yaml
│   │   │   ├── 05-UserMgmtWebApp-Deployment.yaml
│   │   │   └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   │   └── 02-Use-Cloned-Volume-kube-manifests/
│   │       ├── 01-podpvc-clone.yaml
│   │       ├── 02-UserManagement-ConfigMap.yaml
│   │       ├── 03-mysql-deployment.yaml
│   │       ├── 04-mysql-clusterip-service.yaml
│   │       ├── 05-UserMgmtWebApp-Deployment.yaml
│   │       └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   └── README.md
├── 28-GKE-Storage-with-GCP-CloudSQL-Public/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-MySQL-externalName-Service.yaml
│       ├── 02-Kubernetes-Secrets.yaml
│       ├── 03-UserMgmtWebApp-Deployment.yaml
│       └── 04-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 29-GKE-Storage-with-GCP-CloudSQL-Private/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-MySQL-externalName-Service.yaml
│       ├── 02-Kubernetes-Secrets.yaml
│       ├── 03-UserMgmtWebApp-Deployment.yaml
│       └── 04-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 30-GCP-CloudSQL-Private-NO-ExternalNameService/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Kubernetes-Secrets.yaml
│       ├── 02-UserMgmtWebApp-Deployment.yaml
│       └── 03-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 31-GKE-FileStore-default-StorageClass/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-filestore-pvc.yaml
│       ├── 02-write-to-filestore-pod.yaml
│       ├── 03-myapp1-deployment.yaml
│       └── 04-loadBalancer-service.yaml
├── 32-GKE-FileStore-custom-StorageClass/
│   ├── README.md
│   └── kube-manifests/
│       ├── 00-filestore-storage-class.yaml
│       ├── 01-filestore-pvc.yaml
│       ├── 02-write-to-filestore-pod.yaml
│       ├── 03-myapp1-deployment.yaml
│       └── 04-loadBalancer-service.yaml
├── 33-GKE-FileStore-Backup-and-Restore/
│   ├── 01-myapp1-kube-manifests/
│   │   ├── 01-filestore-pvc.yaml
│   │   ├── 02-write-to-filestore-pod.yaml
│   │   ├── 03-myapp1-deployment.yaml
│   │   └── 04-loadBalancer-service.yaml
│   ├── 02-volume-backup-kube-manifests/
│   │   ├── 01-VolumeSnapshotClass.yaml
│   │   └── 02-VolumeSnapshot.yaml
│   ├── 03-volume-restore-myapp2-kube-manifests/
│   │   ├── 01-filestore-pvc.yaml
│   │   ├── 02-myapp2-deployment.yaml
│   │   └── 03-myapp2-loadBalancer-service.yaml
│   └── README.md
├── 34-GKE-Ingress-Basics/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App3-Deployment-and-NodePortService.yaml
│       └── 02-ingress-basic.yaml
├── 35-GKE-Ingress-Context-Path-Routing/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       └── 04-Ingress-ContextPath-Based-Routing.yaml
├── 36-GKE-Ingress-Custom-Health-Check/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       └── 04-Ingress-Custom-Healthcheck.yaml
├── 37-Google-Cloud-Domains/
│   └── README.md
├── 38-GKE-Ingress-ExternalIP/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       └── 04-Ingress-external-ip.yaml
├── 39-GKE-Ingress-Google-Managed-SSL/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       ├── 04-Ingress-SSL.yaml
│       └── 05-Managed-Certificate.yaml
├── 40-GKE-Ingress-Google-Managed-SSL-Redirect/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       ├── 04-Ingress-SSL.yaml
│       ├── 05-Managed-Certificate.yaml
│       └── 06-frontendconfig.yaml
├── 41-GKE-Workload-Identity/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-wid-demo-pod-without-sa.yaml
│       └── 02-wid-demo-pod-with-sa.yaml
├── 42-GKE-ExternalDNS-Install/
│   └── README.md
├── 43-GKE-ExternalDNS-Ingress-Demo/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App3-Deployment-and-NodePortService.yaml
│       └── 02-ingress-external-dns.yaml
├── 44-GKE-ExternalDNS-Service-Demo/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       └── 02-kubernetes-loadbalancer-service.yaml
├── 45-GKE-Ingress-NameBasedVhost-Routing/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       ├── 04-Ingress-NameBasedVHost-Routing.yaml
│       ├── 05-Managed-Certificate.yaml
│       └── 06-frontendconfig.yaml
├── 46-GKE-Ingress-SSL-Policy/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       ├── 04-Ingress-NameBasedVHost-Routing.yaml
│       ├── 05-Managed-Certificate.yaml
│       └── 06-frontendconfig.yaml
├── 47-GKE-Ingress-with-Identity-Aware-Proxy/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       ├── 04-Ingress-NameBasedVHost-Routing.yaml
│       ├── 05-Managed-Certificate.yaml
│       ├── 06-frontendconfig.yaml
│       └── 07-backendconfig.yaml
├── 48-GKE-Ingress-SelfSigned-SSL/
│   ├── README.md
│   ├── SSL-SelfSigned-Certs/
│   │   ├── app1-ingress.crt
│   │   ├── app1-ingress.csr
│   │   ├── app1-ingress.key
│   │   ├── app2-ingress.crt
│   │   ├── app2-ingress.csr
│   │   ├── app2-ingress.key
│   │   ├── app3-ingress.crt
│   │   ├── app3-ingress.csr
│   │   └── app3-ingress.key
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       ├── 04-ingress-self-signed-ssl.yaml
│       └── 05-frontendconfig.yaml
├── 49-GKE-Ingress-Preshared-SSL/
│   ├── README.md
│   ├── SSL-SelfSigned-Certs/
│   │   ├── app1-ingress.crt
│   │   ├── app1-ingress.csr
│   │   ├── app1-ingress.key
│   │   ├── app2-ingress.crt
│   │   ├── app2-ingress.csr
│   │   ├── app2-ingress.key
│   │   ├── app3-ingress.crt
│   │   ├── app3-ingress.csr
│   │   └── app3-ingress.key
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       ├── 04-ingress-preshared-ssl.yaml
│       └── 05-frontendconfig.yaml
├── 50-GKE-Ingress-Cloud-CDN/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       ├── 02-kubernetes-NodePort-service.yaml
│       ├── 03-ingress.yaml
│       └── 04-backendconfig.yaml
├── 51-GKE-Ingress-ClientIP-Affinity/
│   ├── 01-kube-manifests-with-clientip-affinity/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   ├── 02-kubernetes-NodePort-service.yaml
│   │   ├── 03-ingress.yaml
│   │   └── 04-backendconfig.yaml
│   ├── 02-kube-manifests-without-clientip-affinity/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   ├── 02-kubernetes-NodePort-service.yaml
│   │   ├── 03-ingress.yaml
│   │   └── 04-backendconfig.yaml
│   └── README.md
├── 52-GKE-Ingress-Cookie-Affinity/
│   ├── 01-kube-manifests-with-cookie-affinity/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   ├── 02-kubernetes-NodePort-service.yaml
│   │   ├── 03-ingress.yaml
│   │   └── 04-backendconfig.yaml
│   ├── 02-kube-manifests-without-cookie-affinity/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   ├── 02-kubernetes-NodePort-service.yaml
│   │   ├── 03-ingress.yaml
│   │   └── 04-backendconfig.yaml
│   └── README.md
├── 53-GKE-Ingress-HealthCheck-with-backendConfig/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       ├── 02-kubernetes-NodePort-service.yaml
│       ├── 03-ingress.yaml
│       └── 04-backendconfig.yaml
├── 54-GKE-Ingress-InternalLB/
│   ├── 01-kube-manifests/
│   │   ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│   │   ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│   │   ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│   │   └── 04-Ingress-internal-lb.yaml
│   ├── 02-kube-manifests-curl/
│   │   └── 01-curl-pod.yml
│   └── README.md
├── 55-GKE-Ingress-Cloud-Armor/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       ├── 02-kubernetes-NodePort-service.yaml
│       ├── 03-ingress.yaml
│       └── 04-backendconfig.yaml
├── 56-GKE-Artifact-Registry/
│   ├── 01-Docker-Image/
│   │   ├── Dockerfile
│   │   └── index.html
│   ├── 02-kube-manifests/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   └── 02-kubernetes-loadBalancer-service.yaml
│   └── README.md
├── 57-GKE-Continuous-Integration/
│   ├── 01-SSH-Keys/
│   │   ├── id_gcp_cloud_source
│   │   └── id_gcp_cloud_source.pub
│   ├── 02-Docker-Image/
│   │   ├── Dockerfile
│   │   └── index.html
│   ├── 03-cloudbuild-yaml/
│   │   └── cloudbuild.yaml
│   ├── 04-kube-manifests/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   └── 02-kubernetes-loadBalancer-service.yaml
│   └── README.md
├── 58-GKE-Continuous-Delivery-with-CloudBuild/
│   ├── 01-myapp1-k8s-repo/
│   │   └── cloudbuild-delivery.yaml
│   ├── 02-Source-Writer-IAM-Role/
│   │   └── myapp1-k8s-repo-policy.yaml
│   ├── 03-myapp1-app-repo/
│   │   ├── Dockerfile
│   │   ├── README.md
│   │   ├── cloudbuild-trigger-cd.yaml
│   │   ├── cloudbuild.yaml
│   │   ├── index.html
│   │   └── kubernetes.yaml.tpl
│   └── README.md
├── 59-Kubernetes-liveness-probe/
│   ├── 01-liveness-probe-linux-command/
│   │   ├── 01-persistent-volume-claim.yaml
│   │   ├── 02-UserManagement-ConfigMap.yaml
│   │   ├── 03-mysql-deployment.yaml
│   │   ├── 04-mysql-clusterip-service.yaml
│   │   ├── 05-UserMgmtWebApp-Deployment.yaml
│   │   ├── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   │   └── 07-kubernetes-secret.yaml
│   ├── 02-liveness-probe-HTTP-Request/
│   │   ├── 01-persistent-volume-claim.yaml
│   │   ├── 02-UserManagement-ConfigMap.yaml
│   │   ├── 03-mysql-deployment.yaml
│   │   ├── 04-mysql-clusterip-service.yaml
│   │   ├── 05-UserMgmtWebApp-Deployment.yaml
│   │   ├── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   │   └── 07-kubernetes-secret.yaml
│   ├── 03-liveness-probe-TCP-Request/
│   │   ├── 01-persistent-volume-claim.yaml
│   │   ├── 02-UserManagement-ConfigMap.yaml
│   │   ├── 03-mysql-deployment.yaml
│   │   ├── 04-mysql-clusterip-service.yaml
│   │   ├── 05-UserMgmtWebApp-Deployment.yaml
│   │   ├── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   │   └── 07-kubernetes-secret.yaml
│   └── README.md
├── 60-Kubernetes-Startup-Probe/
│   ├── README.md
│   └── kube-manifests-startup-probe/
│       ├── 01-persistent-volume-claim.yaml
│       ├── 02-UserManagement-ConfigMap.yaml
│       ├── 03-mysql-deployment.yaml
│       ├── 04-mysql-clusterip-service.yaml
│       ├── 05-UserMgmtWebApp-Deployment.yaml
│       ├── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│       └── 07-kubernetes-secret.yaml
├── 61-Kubernetes-Readiness-Probe/
│   ├── README.md
│   └── kube-manifests-readiness-probe/
│       ├── 01-persistent-volume-claim.yaml
│       ├── 02-UserManagement-ConfigMap.yaml
│       ├── 03-mysql-deployment.yaml
│       ├── 04-mysql-clusterip-service.yaml
│       ├── 05-UserMgmtWebApp-Deployment.yaml
│       ├── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│       └── 07-kubernetes-secret.yaml
├── 62-Kubernetes-Requests-and-Limits/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       └── 02-kubernetes-loadbalancer-service.yaml
├── 63-GKE-Cluster-Autoscaling/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       └── 02-kubernetes-loadbalancer-service.yaml
├── 64-Kubernetes-Namespaces/
│   ├── 01-kube-manifests-imperative/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   └── 02-kubernetes-loadbalancer-service.yaml
│   ├── 02-kube-manifests-declarative/
│   │   ├── 00-kubernetes-namespace.yaml
│   │   ├── 01-kubernetes-deployment.yaml
│   │   └── 02-kubernetes-loadbalancer-service.yaml
│   └── README.md
├── 65-Kubernetes-Namespaces-ResourceQuota/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-namespace.yaml
│       ├── 02-kubernetes-resourcequota.yaml
│       ├── 03-kubernetes-deployment.yaml
│       └── 04-kubernetes-loadbalancer-service.yaml
├── 66-Kubernetes-Namespaces-LimitRange/
│   ├── 01-kube-manifests-LimitRange-defaults/
│   │   ├── 01-kubernetes-namespace.yaml
│   │   ├── 02-kubernetes-resourcequota-limitrange.yaml
│   │   ├── 03-kubernetes-deployment.yaml
│   │   └── 04-kubernetes-loadbalancer-service.yaml
│   ├── 02-kube-manifests-LimitRange-MinMax/
│   │   ├── 01-kubernetes-namespace.yaml
│   │   ├── 02-kubernetes-resourcequota-limitrange.yaml
│   │   ├── 03-kubernetes-deployment.yaml
│   │   └── 04-kubernetes-loadbalancer-service.yaml
│   └── README.md
├── 67-GKE-Horizontal-Pod-Autoscaler/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       ├── 02-kubernetes-cip-service.yaml
│       └── 03-kubernetes-hpa.yaml
├── 68-GKE-AutoPilot-Cluster/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       └── 02-kubernetes-loadbalancer-service.yaml
├── 69-Access-To-Multiple-Clusters/
│   └── README.md
├── README.md
├── course-presentation/
│   └── Google-Kubernetes-Engine-GKE-GCP-v3R.pptx
└── git-deploy.sh

================================================
FILE CONTENTS
================================================

================================================
FILE: .gitignore
================================================
# Local .terraform directories
**/.terraform/*
.DS_Store 

# .tfstate files
*.tfstate
*.tfstate.*

# Crash log files
crash.log

# Ignore any .tfvars files that are generated automatically for each Terraform run. Most
# .tfvars files are managed as part of configuration and so should be included in
# version control.
#
# example.tfvars

# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json

# Include override files you do wish to add to version control using negated pattern
#
# !example_override.tf

# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*


================================================
FILE: 01-Create-GCP-Account/README.md
================================================
---
title: Create GCP Cloud Account
description: Learn to create GCP Cloud Account
---

## Step-01: Introduction
- Create GCP Cloud Account

## Step-02: Create a Google Account
- We should have a google account (gmail account) before creating GCP cloud Account
- Create one Google Account if not having one.

## Step-03: Create GCP Account
- Go to https://cloud.google.com
- Follow presentation slides to create the GCP Account

## Step-04: Create Budget Alerts
- Go to Billing and Create Budget Alerts


================================================
FILE: 02-Create-GKE-Cluster/README.md
================================================
---
title: GCP Google Kubernetes Engine - Create GKE Cluster
description: Learn to create Google Kubernetes Engine GKE Cluster
---

## Step-01: Introduction
- Create GKE Standard GKE Cluster 
- Configure Google CloudShell to access GKE Cluster
- Deploy simple Kubernetes Deployment and Kubernetes Load Balancer Service and Test 
- Clean-Up

## Step-02: Create Standard GKE Cluster 
- Go to Kubernetes Engine -> Clusters -> CREATE
- Select **GKE Standard -> CONFIGURE**
- **Cluster Basics**
  - **Name:** standard-public-cluster-1
  - **Location type:** Regional
  - **Region:** us-central1
  - **Specify default node locations:** us-central1-a, us-central1-b, us-central1-c
  - **Release Channel**
    - **Release Channel:** Rapid Channel
    - **Version:** LATEST AVAIALABLE ON THAT DAY
  - REST ALL LEAVE TO DEFAULTS
- **NODE POOLS: default-pool**
- **Node pool details**
  - **Name:** default-pool
  - **Number of Nodes (per zone):** 1
  - **Node Pool Upgrade Strategy:** Surge Upgrade
- **Nodes: Configure node settings** 
  - **Image type:** Containerized Optimized OS
  - **Machine configuration**
    - **GENERAL PURPOSE SERIES:** E2
    - **Machine Type:** e2-small
  - **Boot disk type:** Balanced persistent disk
  - **Boot disk size(GB):** 20
  - **Boot disk encryption:** Google-managed encryption key (default )
  - **Enable Node on Spot VMs:** CHECKED
- **Node Networking:** LEAVE TO DEFAULTS  
- **Node Security:** 
  - **Access scopes:** Allow default access (LEAVE TO DEFAULT)
  - REST ALL REVIEW AND LEAVE TO DEFAULTS
- **Node Metadata:** REVIEW AND LEAVE TO DEFAULTS
- **CLUSTER** 
  - **Automation:** REVIEW AND LEAVE TO DEFAULTS
  - **Networking:** REVIEW AND LEAVE TO DEFAULTS
    - **CHECK THIS BOX: Enable Dataplane V2** CHECK IT - IN FUTURE VERSIONS IT WILL BE BY DEFAULT ENABLED
  - **Security:** REVIEW AND LEAVE TO DEFAULTS
    - **CHECK THIS BOX: Enable Workload Identity** CHECK IT - IN FUTURE VERSIONS IT WILL BE BY DEFAULT ENABLED
  - **Metadata:** REVIEW AND LEAVE TO DEFAULTS
  - **Features:** REVIEW AND LEAVE TO DEFAULTS
- CLICK ON **CREATE**

## Step-03: Verify Cluster Details
- Go to Kubernetes Engine -> Clusters -> **standard-public-cluster-1**
- Review
  - Details Tab
  - Nodes Tab
    - Review same nodes **Compute Engine**
  - Storage Tab
    - Review Storage Classes
  - Logs Tab
    - Review Cluster Logs
    - Review Cluster Logs **Filter By Severity**

## Step-04: Verify Additional Features in GKE on a High-Level
### Step-04-01: Verify Workloads Tab
- Go to Kubernetes Engine -> Clusters -> **standard-public-cluster-1**
- Workloads -> **SHOW SYSTEM WORKLOADS**

### Step-04-02: Verify Services & Ingress
- Go to Kubernetes Engine -> Clusters -> **standard-public-cluster-1**
- Services & Ingress -> **SHOW SYSTEM OBJECTS**

### Step-04-03: Verify Applications, Secrets & ConfigMaps
- Go to Kubernetes Engine -> Clusters -> **standard-public-cluster-1**
- Applications
- Secrets & ConfigMaps

### Step-04-04: Verify Storage
- Go to Kubernetes Engine -> Clusters -> **standard-public-cluster-1**
- Storage Classes
  - premium-rwo
  - standard
  - standard-rwo

### Step-04-05: Verify the below
1. Object Browser
2. Migrate to Containers
3. Backup for GKE
4. Config Management
5. Protect

## Step-05: Google CloudShell: Connect to GKE Cluster using kubectl
- [kubectl Authentication in GKE](https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke)
```t
# Verify gke-gcloud-auth-plugin Installation (if not installed, install it)
gke-gcloud-auth-plugin --version 

# Install Kubectl authentication plugin for GKE
sudo apt-get install google-cloud-sdk-gke-gcloud-auth-plugin

# Verify gke-gcloud-auth-plugin Installation
gke-gcloud-auth-plugin --version 

# Configure kubeconfig for kubectl
gcloud container clusters get-credentials <CLUSTER-NAME> --region <REGION> --project <PROJECT-NAME>
gcloud container clusters get-credentials standard-public-cluster-1 --region us-central1 --project kdaida123

# Run kubectl with the new plugin prior to the release of v1.25
vi ~/.bashrc
USE_GKE_GCLOUD_AUTH_PLUGIN=True

# Reload the environment value
source ~/.bashrc

# Check if Environment variable loaded in Terminal
echo $USE_GKE_GCLOUD_AUTH_PLUGIN

# Verify kubectl version
kubectl version --short

# Install kubectl (if not installed)
gcloud components install kubectl

# Configure kubectl
gcloud container clusters get-credentials <CLUSTER-NAME> --zone <ZONE> --project <PROJECT-ID>
gcloud container clusters get-credentials standard-cluster-1 --zone us-central1-c --project kdaida123

# Verify Kubernetes Worker Nodes
kubectl get nodes

# Verify System Pod in kube-system Namespace
kubectl -n kube-system get pods

# Verify kubeconfig file
cat $HOME/.kube/config
kubectl config view
```

## Step-06: Review Sample Application: 01-kubernetes-deployment.yaml
- **Folder:** kube-manifests
```yaml
apiVersion: apps/v1
kind: Deployment 
metadata: #Dictionary
  name: myapp1-deployment
spec: # Dictionary
  replicas: 2
  selector:
    matchLabels:
      app: myapp1
  template:  
    metadata: # Dictionary
      name: myapp1-pod
      labels: # Dictionary
        app: myapp1  # Key value pairs
    spec:
      containers: # List
        - name: myapp1-container
          image: stacksimplify/kubenginx:1.0.0
          ports: 
            - containerPort: 80  
    
```

## Step-07: Review Sample Application: 02-kubernetes-loadbalancer-service.yaml
- **Folder:** kube-manifests
```yaml
apiVersion: v1
kind: Service 
metadata:
  name: myapp1-lb-service
spec:
  type: LoadBalancer # ClusterIp, # NodePort
  selector:
    app: myapp1
  ports: 
    - name: http
      port: 80 # Service Port
      targetPort: 80 # Container Port
```

## Step-08: Upload Sample App to Google CloudShell
```t
# Upload Sample App to Google CloudShell
Go to Google CloudShell -> 3 Dots -> Upload -> Folder -> google-kubernetes-engine

# Change Directory
cd google-kubernetes-engine/02-Create-GKE-Cluster

# Verify folder uploaded
ls kube-manifests/

# Verify Files
cat kube-manifests/01-kubernetes-deployment.yaml
cat kube-manifests/02-kubernetes-loadbalancer-service.yaml
```

## Step-09: Deploy Sample Application and Verify
```t
# Change Directory
cd google-kubernetes-engine/02-Create-GKE-Cluster

# Deploy Sample App using kubectl
kubectl apply -f kube-manifests/

# List Deployments
kubectl get deploy

# List Pods
kubectl get pod

# List Services
kubectl get svc

# Access Sample Application
http://<EXTERNAL-IP>
```

## Step-10: Verify Workloads in GKE Dashboard
- Go to GCP Console -> Kubernetes Engine -> Workloads
- Click on  **myapp1-deployment**
- Review all tabs

## Step-11: Verify Services in GKE Dashboard
- Go to GCP Console -> Kubernetes Engine -> Services & Ingress
- Click on **myapp1-lb-service**
- Review all tabs

## Step-13: Verify Load Balancer
- Go to GCP Console -> Networking Services -> Load Balancing
- Review all tabs

## Step-14: Clean-Up
- Go to Google Cloud Shell
```t
# Change Directory
cd google-kubernetes-engine/02-Create-GKE-Cluster

# Delete Kubernetes Deployment and Service
kubectl delete -f kube-manifests/

# List Deployments
kubectl get deploy

# List Pods
kubectl get pod

# List Services
kubectl get svc
```





================================================
FILE: 02-Create-GKE-Cluster/kube-manifests/01-kubernetes-deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment 
metadata: #Dictionary
  name: myapp1-deployment
spec: # Dictionary
  replicas: 2
  selector:
    matchLabels:
      app: myapp1
  template:  
    metadata: # Dictionary
      name: myapp1-pod
      labels: # Dictionary
        app: myapp1  # Key value pairs
    spec:
      containers: # List
        - name: myapp1-container
          image: us-docker.pkg.dev/google-samples/containers/gke/hello-app:1.0
          ports: 
            - containerPort: 8080  
    

================================================
FILE: 02-Create-GKE-Cluster/kube-manifests/02-kubernetes-loadbalancer-service.yaml
================================================
apiVersion: v1
kind: Service 
metadata:
  name: myapp1-lb-service
spec:
  type: LoadBalancer # ClusterIp, # NodePort
  selector:
    app: myapp1
  ports: 
    - name: http
      port: 80 # Service Port
      targetPort: 8080 # Container Port


================================================
FILE: 03-gcloud-cli-install-macos/README.md
================================================
---
title: gcloud cli install on macOS
description: Learn to install gcloud cli on MacOS
---

## Step-01: Introduction
- Install gcloud CLI on MacOS
- Configure kubeconfig for kubectl on your local terminal
- Verify if you are able to reach GKE Cluster using kubectl from your local terminal

## Step-02: Install gcloud cli on MacOS
- [Install gcloud cli](https://cloud.google.com/sdk/docs/install-sdk#mac)
```t
# Verify Python Version (Supported versions are Python 3 (3.5 to 3.8, 3.7 recommended)
python3 -V

# Determine your machine hardware 
uname -m

# Create Folder
mkdir gcloud-cli-software

# Download gcloud cli based on machine hardware 
## Important Note: Download the latest version available on that respective day
Dowload Link: https://cloud.google.com/sdk/docs/install-sdk#mac

## As on today the below is the latest version (x86_64 bit)
curl -O https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-cli-418.0.0-darwin-x86_64.tar.gz

# Unzip binary
ls -lrta
tar -zxf google-cloud-cli-418.0.0-darwin-x86_64.tar.gz

# Run the install script with screen reader mode on:
./google-cloud-sdk/install.sh --screen-reader=true
```

## Step-03: Verify gcloud cli version
```t
# Open new terminal
AS PATH is updated, open new terminal

# gcloud cli version
gcloud version

## Sample Output
Kalyans-Mac-mini:gcloud-cli-software kalyanreddy$ gcloud version
Google Cloud SDK 418.0.0
bq 2.0.85
core 2023.02.13
gcloud-crc32c 1.0.0
gsutil 5.20
Kalyans-Mac-mini:gcloud-cli-software kalyanreddy$
```

## Step-04: Intialize gcloud CLI in local Terminal 
```t
# Initialize gcloud CLI
./google-cloud-sdk/bin/gcloud init

# gcloud config Configurations Commands (For Reference)
gcloud config list
gcloud config configurations list
gcloud config configurations activate
gcloud config configurations create
gcloud config configurations delete
gcloud config configurations describe
gcloud config configurations rename
```

## Step-05: Verify gke-gcloud-auth-plugin 
```t
# Change Directroy
gcloud-cli-software

## Important Note about gke-gcloud-auth-plugin: 
1. Kubernetes clients require an authentication plugin, gke- gcloud-auth-plugin, which uses the Client-go Credential Plugins framework to provide authentication tokens to communicate with GKE clusters

# Verify if gke-gcloud-auth-plugin installed
gke-gcloud-auth-plugin --version

# Install gke-gcloud-auth-plugin
gcloud components install gke-gcloud-auth-plugin

# Verify if gke-gcloud-auth-plugin installed
gke-gcloud-auth-plugin --version
```

## Step-06: Remove any existing kubectl clients
```t
# Verify kubectl version
kubectl version --short
which kubectl 
Observation: 
1. We are not using kubectl from gcloud CLI and we need to fix that. 

# Removing existing kubectl
which kubectl
rm /usr/local/bin/kubectl
```

## Step-07: Install kubectl client from gcloud CLI
```t
# List gcloud components
gcloud components list

## SAMPLE OUTPUT
Status: Not Installed
Name: kubectl
ID: kubectl
Size: < 1 MiB

# Install kubectl client
gcloud components install kubectl

# Verify kubectl version
OPEN NEW TERMINAL AS PATH IS UPDATED
kubectl version --short
which kubectl
```


## Step-08: Fix kubectl client version equal to GKE Cluster version
- **Important Note:** You must use a kubectl version that is within one minor version difference of your Kubernetescluster control plane. 
- For example, a 1.24 kubectl client works with Kubernetes Cluster 1.23, 1.24 and 1.25 clusters.
- As our GKE cluster version is 1.26, we will also upgrade our kubectl to 1.26
```t
# Verify kubectl version
OPEN NEW TERMINAL AS PATH IS UPDATED
kubectl version --short
which kubectl

# Change Directroy 
cd /Users/kalyanreddy/Documents/course-repos/gcloud-cli-software/google-cloud-sdk/bin/

# List files
ls -lrta

# Backup existing kubectl
cp kubectl kubectl_bkup_1.24

# Copy latest kubectl
cp kubectl.1.26 kubectl

# Verify kubectl version
kubectl version --short
which kubectl
```

## Step-09: Configure kubeconfig for kubectl in local desktop terminal
```t
# Clean-Up kubeconfig file (if any older configs exists)
rm $HOME/.kube/config

# Configure kubeconfig for kubectl 
gcloud container clusters get-credentials <GKE-CLUSTER-NAME> --region <REGION> --project <PROJECT>
gcloud container clusters get-credentials standard-public-cluster-1 --region us-central1 --project kdaida123

# Verify Kubernetes Worker Nodes
kubectl get nodes


# Verify System Pod in kube-system Namespace
kubectl -n kube-system get pods

# Verify kubeconfig file
cat $HOME/.kube/config
kubectl config view
```



## References
- [gcloud CLI](https://cloud.google.com/sdk/gcloud)
- [Install the Google Cloud CLI](https://cloud.google.com/sdk/docs/install-sdk#mac)

================================================
FILE: 04-gcloud-cli-install-windowsos/README.md
================================================
---
title: gcloud cli install on macOS
description: Learn to install gcloud cli on WindowsOS
---

## Step-01: Introduction
- Install gcloud CLI on WindowsOS
- Configure kubeconfig for kubectl on your local terminal
- Verify if you are able to reach GKE Cluster using kubectl from your local terminal
- Fix kubectl version to match with GKE Cluster Server Version. 

## Step-02: Install gcloud cli on WindowsOS
- [Install gcloud cli on WindowsOS](https://cloud.google.com/sdk/docs/install-sdk#windows)
```t
## Important Note: Download the latest version available on that respective day
Dowload Link: https://cloud.google.com/sdk/docs/install-sdk#windows

## Run the Installer
GoogleCloudSDKInstaller.exe
```

## Step-03: Verify gcloud cli version
```t
# gcloud cli version
gcloud version
```

## Step-04: Intialize gcloud CLI in local Terminal 
```t
# Initialize gcloud CLI
gcloud init

# List accounts whose credentials are stored on the local system:
gcloud auth list

# List the properties in your active gcloud CLI configuration
gcloud config list

# View information about your gcloud CLI installation and the active configuration
gcloud info

# gcloud config Configurations Commands (For Reference)
gcloud config list
gcloud config configurations list
gcloud config configurations activate
gcloud config configurations create
gcloud config configurations delete
gcloud config configurations describe
gcloud config configurations rename
```

## Step-05: Verify gke-gcloud-auth-plugin 
```t
## Important Note about gke-gcloud-auth-plugin: 
1. Kubernetes clients require an authentication plugin, gke- gcloud-auth-plugin, which uses the Client-go Credential Plugins framework to provide authentication tokens to communicate with GKE clusters

# Verify if gke-gcloud-auth-plugin installed
gke-gcloud-auth-plugin --version

# Install gke-gcloud-auth-plugin
gcloud components install gke-gcloud-auth-plugin

# Verify if gke-gcloud-auth-plugin installed
gke-gcloud-auth-plugin --version
```

## Step-06: Remove any existing kubectl clients
```t
# Verify kubectl version
kubectl version --output=yaml
Observation: 
1. If any kubectl exists before installing it from gcloud then uninstall it.
2. Usually if docker is installed on our desktop, its equivalent kubectl package mostly will be installed and set on PATH. If exists please remove it.  

```

## Step-07: Install kubectl client from gcloud CLI
```t
# List gcloud components
gcloud components list

## SAMPLE OUTPUT
Status: Not Installed
Name: kubectl
ID: kubectl
Size: < 1 MiB

# Install kubectl client
gcloud components install kubectl

# Verify kubectl version
kubectl version --output=yaml
```


## Step-08: Configure kubeconfig for kubectl in local desktop terminal
```t
# Verify kubeconfig file
kubectl config view

# Configure kubeconfig for kubectl 
gcloud container clusters get-credentials <GKE-CLUSTER-NAME> --region <REGION> --project <PROJECT>
gcloud container clusters get-credentials standard-public-cluster-1 --region us-central1 --project kdaida123

# Verify kubeconfig file
kubectl config view

# Verify Kubernetes Worker Nodes
kubectl get nodes
Observation: 
1. It should throw warning at the end about huge difference in kubectl client version and GKE Cluster Server Version
2. Lets fix that in next step. 

```
## Step-09: Fix kubectl client version equal to GKE Cluster version
- **Important Note:** You must use a kubectl version that is within one minor version difference of your Kubernetescluster control plane. 
- For example, a 1.24 kubectl client works with Kubernetes Cluster 1.23, 1.24 and 1.25 clusters.
- As our GKE cluster version is 1.26, we will also upgrade our kubectl to 1.26
```t
# Verify kubectl version
kubectl version --output=yaml

# Change Directroy 
Go to Google Cloud SDK "bin" directory

# Backup existing kubectl
Backup "kubectl" to "kubectl_bkup_1.24"

# Copy latest kubectl
COPY  "kubectl.1.26" as "kubectl"

# Verify kubectl version
kubectl version --output=yaml
```

## References
- [gcloud CLI](https://cloud.google.com/sdk/gcloud)
- [Install the Google Cloud CLI](https://cloud.google.com/sdk/docs/install-sdk#mac)

================================================
FILE: 05-Docker-For-Beginners/README.md
================================================
---
title: Docker Fundamentals
description: Learn Docker Fundamentals
---

## Docker Fundamentals
- For Docker Fundamentals github repository, please click on below link
- https://github.com/stacksimplify/docker-fundamentals



================================================
FILE: 06-kubectl-imperative-k8s-pods/README.md
================================================
---
title: Kubernetes PODs
description: Learn about Kubernetes Pods
---

## Step-01: PODs Introduction
- What is a POD ?
- What is a Multi-Container POD?

## Step-02: PODs Demo
### Step-02-01: Get Worker Nodes Status
- Verify if kubernetes worker nodes are ready. 
```t
# Configure kubeconfig for kubectl
gcloud container clusters get-credentials <CLUSTER-NAME> --region <REGION> --project <PROJECT-NAME>
gcloud container clusters get-credentials standard-public-cluster-1 --region us-central1 --project kdaida123

# Get Worker Node Status
kubectl get nodes

# Get Worker Node Status with wide option
kubectl get nodes -o wide
```

### Step-02-02:  Create a Pod
- Create a Pod
```t
# Template
kubectl run <desired-pod-name> --image <Container-Image> 

# Replace Pod Name, Container Image
kubectl run my-first-pod --image stacksimplify/kubenginx:1.0.0
```  

### Step-02-03: List Pods
- Get the list of pods
```t
# List Pods
kubectl get pods

# Alias name for pods is po
kubectl get po
```

### Step-02-04: List Pods with wide option
- List pods with wide option which also provide Node information on which Pod is running
```t
# List Pods with Wide Option
kubectl get pods -o wide
```

### Step-02-05: What happened in the backgroup when above command is run?
1. Kubernetes created a pod
2. Pulled the docker image from docker hub
3. Created the container in the pod
4. Started the container present in the pod

### Step-02-06: Describe Pod
- Describe the POD, primarily required during troubleshooting. 
- Events shown will be of a great help during troubleshooting. 
```t
# To get list of pod names
kubectl get pods

# Describe the Pod
kubectl describe pod <Pod-Name>
kubectl describe pod my-first-pod 
Observation:
1. Review Events - thats the key for troubleshooting, understanding what happened
```

### Step-02-07: Access Application
- Currently we can access this application only inside worker nodes. 
- To access it externally, we need to create a **NodePort or Load Balancer Service**. 
- **Services** is one very very important concept in Kubernetes. 

### Step-02-08: Delete Pod
```t
# To get list of pod names
kubectl get pods

# Delete Pod
kubectl delete pod <Pod-Name>
kubectl delete pod my-first-pod
```

## Step-03: Load Balancer Service Introduction
- What are Services in k8s?
- What is a Load Balancer Service?
- How it works?

## Step-04: Demo - Expose Pod with a Service
- Expose pod with a service (Load Balancer Service) to access the application externally (from internet)
- **Ports**
  - **port:** Port on which node port service listens in Kubernetes cluster internally
  - **targetPort:** We define container port here on which our application is running.
- Verify the following before LB Service creation
  - Azure Standard Load Balancer created for Azure AKS Cluster
    - Frontend IP Configuration
    - Load Balancing Rules
  - Azure Public IP 
```t
# Create  a Pod
kubectl run <desired-pod-name> --image <Container-Image> 
kubectl run my-first-pod --image stacksimplify/kubenginx:1.0.0 

# Expose Pod as a Service
kubectl expose pod <Pod-Name>  --type=LoadBalancer --port=80 --name=<Service-Name>
kubectl expose pod my-first-pod  --type=LoadBalancer --port=80 --name=my-first-service

# Get Service Info
kubectl get service
kubectl get svc
Observation:
1. Initially External-IP will show as pending and slowly it will get the external-ip assigned and displayed.
2. It will take 2 to 3 minutes to get the external-ip listed

# Describe Service
kubectl describe service my-first-service

# Access Application
http://<External-IP-from-get-service-output>
curl http://<External-IP-from-get-service-output>
```
- Verify the following after LB Service creation
- Google Load Balancer created, verify it. 
  - Verify Backends 
  - Verify Frontends
- Verify **Workloads and Services** on Google GKE Dashboard GCP Console


## Step-05: Interact with a Pod
### Step-05-01: Verify Pod Logs
```t
# Get Pod Name
kubectl get po

# Dump Pod logs
kubectl logs <pod-name>
kubectl logs my-first-pod

# Stream pod logs with -f option and access application to see logs
kubectl logs <pod-name>
kubectl logs -f my-first-pod
```
- **Important Notes**
- Refer below link and search for **Interacting with running Pods** for additional log options
- Troubleshooting skills are very important. So please go through all logging options available and master them.
- **Reference:** https://kubernetes.io/docs/reference/kubectl/cheatsheet/

### Step-05-02: Connect to a Container in POD and execute command
```t
# Connect to Nginx Container in a POD
kubectl exec -it <pod-name> -- /bin/bash
kubectl exec -it my-first-pod -- /bin/bash

# Execute some commands in Nginx container
ls
cd /usr/share/nginx/html
cat index.html
exit
```
### Step-05-03: Running individual commands in a Container
```t
# Template
kubectl exec -it <pod-name> -- <COMMAND>

# Sample Commands
kubectl exec -it my-first-pod -- env
kubectl exec -it my-first-pod -- ls
kubectl exec -it my-first-pod -- cat /usr/share/nginx/html/index.html
```

## Step-06: Get YAML Output of Pod & Service
### Get YAML Output
```t
# Get pod definition YAML output
kubectl get pod my-first-pod -o yaml   

# Get service definition YAML output
kubectl get service my-first-service -o yaml   
```

## Step-07: Clean-Up
```t
# Get all Objects in default namespace
kubectl get all

# Delete Services
kubectl delete svc my-first-service

# Delete Pod
kubectl delete pod my-first-pod

# Get all Objects in default namespace
kubectl get all
```


## LOGS - More Options

```t
# Return snapshot logs from pod nginx with only one container
kubectl logs nginx

# Return snapshot of previous terminated ruby container logs from pod web-1
kubectl logs -p -c ruby web-1

# Begin streaming the logs of the ruby container in pod web-1
kubectl logs -f -c ruby web-1

# Display only the most recent 20 lines of output in pod nginx
kubectl logs --tail=20 nginx

# Show all logs from pod nginx written in the last hour
kubectl logs --since=1h nginx
```


================================================
FILE: 07-kubectl-declarative-k8s-ReplicaSets/README.md
================================================
---
title: Kubernetes ReplicaSets
description: Learn about Kubernetes ReplicaSets
---

## Step-01: Introduction to ReplicaSets
- What are ReplicaSets?
- What is the advantage of using ReplicaSets?

## Step-02: Create ReplicaSet

### Step-02-01: Create ReplicaSet
- Create ReplicaSet
```t
# Kubernetes ReplicaSet
kubectl create -f replicaset-demo.yml
```
- **replicaset-demo.yml**
```yaml
apiVersion: apps/v1
kind: ReplicaSet
metadata:
  name: my-helloworld-rs
  labels:
    app: my-helloworld
spec:
  replicas: 3
  selector:
    matchLabels:
      app: my-helloworld
  template:
    metadata:
      labels:
        app: my-helloworld
    spec:
      containers:
      - name: my-helloworld-app
        image: stacksimplify/kube-helloworld:1.0.0
```

### Step-02-02: List ReplicaSets
- Get list of ReplicaSets
```t
# List ReplicaSets
kubectl get replicaset
kubectl get rs
```

### Step-02-03: Describe ReplicaSet
- Describe the newly created ReplicaSet
```t
# Describe ReplicaSet
kubectl describe rs/<replicaset-name>

kubectl describe rs/my-helloworld-rs
[or]
kubectl describe rs my-helloworld-rs
```

### Step-02-04: List of Pods
- Get list of Pods
```t
# Get list of Pods
kubectl get pods
kubectl describe pod <pod-name>

# Get list of Pods with Pod IP and Node in which it is running
kubectl get pods -o wide
```

### Step-02-05: Verify the Owner of the Pod
- Verify the owner reference of the pod.
- Verify under **"name"** tag under **"ownerReferences"**. We will find the replicaset name to which this pod belongs to. 
```t
# List Pod with Output as YAML
kubectl get pods <pod-name> -o yaml
kubectl get pods my-helloworld-rs-c8rrj -o yaml 
```

## Step-03: Expose ReplicaSet as a Service
- Expose ReplicaSet with a service (Load Balancer Service) to access the application externally (from internet)
```t
# Expose ReplicaSet as a Service
kubectl expose rs <ReplicaSet-Name>  --type=LoadBalancer --port=80 --target-port=8080 --name=<Service-Name-To-Be-Created>
kubectl expose rs my-helloworld-rs  --type=LoadBalancer --port=80 --target-port=8080 --name=my-helloworld-rs-service

# List Services
kubectl get service
kubectl get svc
```
- **Access the Application using External or Public IP**
```t
# Access Application
http://<External-IP-from-get-service-output>/hello
curl http://<External-IP-from-get-service-output>/hello

# Observation
1. Each time we access the application, request will be sent to different pod and pods id will be displayed for us. 
```

## Step-04: Test Replicaset Reliability or High Availability 
- Test how the high availability or reliability concept is achieved automatically in Kubernetes
- Whenever a POD is accidentally terminated due to some application issue, ReplicaSet should auto-create that Pod to maintain desired number of Replicas configured to achive High Availability.
```t
# To get Pod Name
kubectl get pods

# Delete the Pod
kubectl delete pod <Pod-Name>

# Verify the new pod got created automatically
kubectl get pods   (Verify Age and name of new pod)
``` 

## Step-05: Test ReplicaSet Scalability feature 
- Test how scalability is going to seamless & quick
- Update the **replicas** field in **replicaset-demo.yml** from 3 to 6.
```yaml
# Before change
spec:
  replicas: 3

# After change
spec:
  replicas: 6
```
- Update the ReplicaSet
```t
# Apply latest changes to ReplicaSet
kubectl replace -f replicaset-demo.yml

# Verify if new pods got created
kubectl get pods -o wide
```

## Step-06: Delete ReplicaSet & Service
### Step-06-01: Delete ReplicaSet
```t
# Delete ReplicaSet
kubectl delete rs <ReplicaSet-Name>

# Sample Commands
kubectl delete rs/my-helloworld-rs
[or]
kubectl delete rs my-helloworld-rs

# Verify if ReplicaSet got deleted
kubectl get rs
```

### Step-06-02: Delete Service created for ReplicaSet
```t
# Delete Service
kubectl delete svc <service-name>

# Sample Commands
kubectl delete svc my-helloworld-rs-service
[or]
kubectl delete svc/my-helloworld-rs-service

# Verify if Service got deleted
kubectl get svc
```


================================================
FILE: 07-kubectl-declarative-k8s-ReplicaSets/replicaset-demo.yml
================================================
apiVersion: apps/v1
kind: ReplicaSet
metadata:
  name: my-helloworld-rs
  labels:
    app: my-helloworld
spec:
  replicas: 3
  selector:
    matchLabels:
      app: my-helloworld
  template:
    metadata:
      labels:
        app: my-helloworld
    spec:
      containers:
      - name: my-helloworld-app
        image: stacksimplify/kube-helloworld:1.0.0


================================================
FILE: 08-kubectl-imperative-k8s-deployment-CREATE/README.md
================================================
---
title: Kubernetes - Deployment
description: Learn and Implement Kubernetes Deployment
---

## Kubernetes Deployment - Topics
1. Create Deployment
2. Scale the Deployment
3. Expose Deployment as a Service
4. Update Deployment
5. Rollback Deployment
6. Rolling Restarts
7. Pause & Resume Deployments
8. Canary Deployments (Will be covered at Declarative section of Deployments)

## Step-01: Introduction to Deployments
- What is a Deployment?
- What all we can do using Deployment?
- Create a Deployment
- Scale the Deployment
- Expose the Deployment as a Service

## Step-02: Create Deployment
- Create Deployment to rollout a ReplicaSet
- Verify Deployment, ReplicaSet & Pods
- **Docker Image Location:** https://hub.docker.com/repository/docker/stacksimplify/kubenginx
```t
# Create Deployment
kubectl create deployment <Deplyment-Name> --image=<Container-Image>
kubectl create deployment my-first-deployment --image=stacksimplify/kubenginx:1.0.0 

# Verify Deployment
kubectl get deployments
kubectl get deploy 

# Describe Deployment
kubectl describe deployment <deployment-name>
kubectl describe deployment my-first-deployment

# Verify ReplicaSet
kubectl get rs

# Verify Pod
kubectl get po
```
### Update Change-Cause for the Kubernetes Deployment - Rollout History
- **Observation:** We have the rollout history, so we can switch back to older revisions using revision history available to us
```t
# Verify Rollout History
kubectl rollout history deployment/my-first-deployment

# Update REVISION CHANGE-CAUSE for Kubernetes Deployment
kubectl annotate deployment/my-first-deployment kubernetes.io/change-cause="Deployment CREATE - App Version 1.0.0"

# Verify Rollout History
kubectl rollout history deployment/my-first-deployment
```
## Step-03: Scaling a Deployment
- Scale the deployment to increase the number of replicas (pods)
```t
# Scale Up the Deployment
kubectl scale --replicas=10 deployment/<Deployment-Name>
kubectl scale --replicas=10 deployment/my-first-deployment 

# Verify Deployment
kubectl get deploy

# Verify ReplicaSet
kubectl get rs

# Verify Pods
kubectl get po

# Scale Down the Deployment
kubectl scale --replicas=2 deployment/my-first-deployment 
kubectl get deploy
```

## Step-04: Expose Deployment as a Service
- Expose **Deployment** with a service (LoadBalancer Service) to access the application externally (from internet)
```t
# Expose Deployment as a Service
kubectl expose deployment <Deployment-Name>  --type=LoadBalancer --port=80 --target-port=80 --name=<Service-Name-To-Be-Created>
kubectl expose deployment my-first-deployment --type=LoadBalancer --port=80 --target-port=80 --name=my-first-deployment-service

# Get Service Info
kubectl get svc
```
- **Access the Application using Public IP**
```t
# Access Application
http://<External-IP-from-get-service-output>
curl http://<External-IP-from-get-service-output>
```

================================================
FILE: 09-kubectl-imperative-k8s-deployment-UPDATE/README.md
================================================
---
title: Kubernetes - Update Deployment
description: Learn and Implement Kubernetes Update Deployment
---
## Step-00: Introduction
- We can update deployments using two options
  - Set Image
  - Edit Deployment

## Step-01: Updating Application version V1 to V2 using "Set Image" Option
### Update Deployment
- **Observation:** Please Check the container name in `spec.container.name` yaml output and make a note of it and 
replace in `kubectl set image` command <Container-Name>
```t
# Get Container Name from current deployment
kubectl get deployment my-first-deployment -o yaml

# Update Deployment - SHOULD WORK NOW
kubectl set image deployment/<Deployment-Name> <Container-Name>=<Container-Image> 
kubectl set image deployment/my-first-deployment kubenginx=stacksimplify/kubenginx:2.0.0 
```

### Verify Rollout Status (Deployment Status)
- **Observation:** By default, rollout happens in a rolling update model, so no downtime.
```t
# Verify Rollout Status 
kubectl rollout status deployment/my-first-deployment

# Verify Deployment
kubectl get deploy
```
### Describe Deployment
- **Observation:**
  - Verify the Events and understand that Kubernetes by default do  "Rolling Update"  for new application releases. 
  - With that said, we will not have downtime for our application.
```t
# Descibe Deployment
kubectl describe deployment my-first-deployment
```
### Verify ReplicaSet
- **Observation:** New ReplicaSet will be created for new version
```t
# Verify ReplicaSet
kubectl get rs
```

### Verify Pods
- **Observation:** Pod template hash label of new replicaset should be present for PODs letting us 
know these pods belong to new ReplicaSet.
```t
# List Pods
kubectl get po
```
### Access the Application using Public IP
- We should see `Application Version:V2` whenever we access the application in browser
```t
# Get Load Balancer IP
kubectl get svc

# Application URL
http://<External-IP-from-get-service-output>
```

### Update Change-Cause for the Kubernetes Deployment - Rollout History
- **Observation:** We have the rollout history, so we can switch back to older revisions using revision history available to us.  
```t
# Verify Rollout History
kubectl rollout history deployment/my-first-deployment

# Update REVISION CHANGE-CAUSE
kubectl annotate deployment/my-first-deployment kubernetes.io/change-cause="Deployment UPDATE - App Version 2.0.0 - SET IMAGE OPTION"

# Verify Rollout History
kubectl rollout history deployment/my-first-deployment
```


## Step-02: Update the Application from V2 to V3 using "Edit Deployment" Option
### Edit Deployment
```t
# Edit Deployment
kubectl edit deployment/<Deployment-Name> 
kubectl edit deployment/my-first-deployment 
```

```yaml
# Change From 2.0.0
    spec:
      containers:
      - image: stacksimplify/kubenginx:2.0.0

# Change To 3.0.0
    spec:
      containers:
      - image: stacksimplify/kubenginx:3.0.0
```


### Verify Rollout Status
- **Observation:** Rollout happens in a rolling update model, so no downtime.
```t
# Verify Rollout Status 
kubectl rollout status deployment/my-first-deployment

# Describe Deployment
kubectl describe deployment/my-first-deployment
```
### Verify Replicasets
- **Observation:**  We should see 3 ReplicaSets now, as we have updated our application to 3rd version 3.0.0
```t
# Verify ReplicaSet and Pods
kubectl get rs
kubectl get po
```

### Access the Application using Public IP
- We should see `Application Version:V3` whenever we access the application in browser
```t
# Get Load Balancer IP
kubectl get svc

# Application URL
http://<External-IP-from-get-service-output>
```

### Update Change-Cause for the Kubernetes Deployment - Rollout History
- **Observation:** We have the rollout history, so we can switch back to older revisions using revision history available to us. 
```t
# Verify Rollout History
kubectl rollout history deployment/my-first-deployment

# Update REVISION CHANGE-CAUSE
kubectl annotate deployment/my-first-deployment kubernetes.io/change-cause="Deployment UPDATE - App Version 3.0.0 - EDIT DEPLOYMENT OPTION"

# Verify Rollout History
kubectl rollout history deployment/my-first-deployment
```

================================================
FILE: 10-kubectl-imperative-k8s-deployment-ROLLBACK/README.md
================================================
---
title: Kubernetes - Rollback Deployment
description: Learn and Implement Kubernetes Rollback Deployment
---

## Step-00: Introduction
- We can rollback a deployment in two ways.
  - Previous Version
  - Specific Version

## Step-01: Rollback a Deployment to previous version

### Check the Rollout History of a Deployment
```t
# List Deployment Rollout History
kubectl rollout history deployment/<Deployment-Name>
kubectl rollout history deployment/my-first-deployment  
```

### Verify changes in each revision
- **Observation:** Review the "Annotations" and "Image" tags for clear understanding about changes.
```t
# List Deployment History with revision information
kubectl rollout history deployment/my-first-deployment --revision=1
kubectl rollout history deployment/my-first-deployment --revision=2
kubectl rollout history deployment/my-first-deployment --revision=3
```


### Rollback to previous version
- **Observation:** If we rollback, it will go back to revision-2 and its number increases to revision-4
```t
# Undo Deployment
kubectl rollout undo deployment/my-first-deployment

# List Deployment Rollout History
kubectl rollout history deployment/my-first-deployment  
```

### Verify Deployment, Pods, ReplicaSets
```t
# Verify Deployment, Pods, ReplicaSets
kubectl get deploy
kubectl get rs
kubectl get po
kubectl describe deploy my-first-deployment
```

### Access the Application using Public IP
- We should see `Application Version:V2` whenever we access the application in browser
```t
# Get Load Balancer IP
kubectl get svc

# Application URL
http://<External-IP-from-get-service-output>
```


## Step-02: Rollback to specific revision
### Check the Rollout History of a Deployment
```t
# List Deployment Rollout History
kubectl rollout history deployment/<Deployment-Name>
kubectl rollout history deployment/my-first-deployment 
```
### Rollback to specific revision
```t
# Rollback Deployment to Specific Revision
kubectl rollout undo deployment/my-first-deployment --to-revision=3
```

### List Deployment History
- **Observation:** If we rollback to revision 3, it will go back to revision-3 and its number increases to revision-5 in rollout history
```t
# List Deployment Rollout History
kubectl rollout history deployment/my-first-deployment
```


### Access the Application using Public IP
- We should see `Application Version:V3` whenever we access the application in browser
```t
# Get Load Balancer IP
kubectl get svc

# Application URL
http://<Load-Balancer-IP>
```

## Step-03: Rolling Restarts of Application
- Rolling restarts will kill the existing pods and recreate new pods in a rolling fashion. 
```t
# Rolling Restarts
kubectl rollout restart deployment/<Deployment-Name>
kubectl rollout restart deployment/my-first-deployment

# Get list of Pods
kubectl get po
```

================================================
FILE: 11-kubectl-imperative-k8s-deployment-PAUSE-RESUME/README.md
================================================
---
title: Kubernetes - Pause & Resume Deployments
description: Implement Kubernetes - Pause & Resume Deployments
---
## Step-00: Introduction
- Why do we need Pausing & Resuming Deployments?
  - If we want to make multiple changes to our Deployment, we can pause the deployment make all changes and resume it. 
- We are going to update our Application Version from **V3 to V4** as part of learning "Pause and Resume Deployments"  

## Step-01: Pausing & Resuming Deployments
### Check current State of Deployment & Application
 ```t
# Check the Rollout History of a Deployment
kubectl rollout history deployment/my-first-deployment  
Observation: Make a note of last version number

# Get list of ReplicaSets
kubectl get rs
Observation: Make a note of number of replicaSets present.

# Access the Application 
http://<External-IP-from-get-service-output>
Observation: Make a note of application version
```

### Pause Deployment and Two Changes
```t
# Pause the Deployment
kubectl rollout pause deployment/<Deployment-Name>
kubectl rollout pause deployment/my-first-deployment

# Update Deployment - Application Version from V3 to V4
kubectl set image deployment/my-first-deployment kubenginx=stacksimplify/kubenginx:4.0.0 

# Check the Rollout History of a Deployment
kubectl rollout history deployment/my-first-deployment  
Observation: No new rollout should start, we should see same number of versions as we check earlier with last version number matches which we have noted earlier.

# Get list of ReplicaSets
kubectl get rs
Observation: No new replicaSet created. We should have same number of replicaSets as earlier when we took note. 

# Make one more change: set limits to our container
kubectl set resources deployment/my-first-deployment -c=kubenginx --limits=cpu=20m,memory=30Mi
```
### Resume Deployment 
```t
# Resume the Deployment
kubectl rollout resume deployment/my-first-deployment

# Check the Rollout History of a Deployment
kubectl rollout history deployment/my-first-deployment  
Observation: You should see a new version got created

# Update REVISION CHANGE-CAUSE
kubectl annotate deployment/my-first-deployment kubernetes.io/change-cause="Deployment PAUSE RESUME Demo - App Version 4.0.0 "

# Check the Rollout History of a Deployment
kubectl rollout history deployment/my-first-deployment

# Get list of ReplicaSets
kubectl get rs
Observation: You should see new ReplicaSet.

# Get Load Balancer IP
kubectl get svc
```
### Access Application
```t
# Access the Application 
http://<External-IP-from-get-service-output>
Observation: You should see Application V4 version
```


## Step-02: Clean-Up
```t
# Delete Deployment
kubectl delete deployment my-first-deployment

# Delete Service
kubectl delete svc my-first-deployment-service

# Get all Objects from Kubernetes default namespace
kubectl get all
```

================================================
FILE: 12-kubectl-imperative-k8s-services/README.md
================================================
---
title: Kubernetes Services
description: Learn about Kubernetes ClusterIP and Load Balancer Services
---
## Step-01: Introduction to Services
- **Service Types**
  1. ClusterIp
  2. NodePort
  3. LoadBalancer
  4. ExternalName
  5. Ingress
- We are going to look in to ClusterIP and LoadBalancer Service in this section with a detailed example. 
- LoadBalancer Type is primarily for cloud providers and it will differ cloud to cloud, so we will do it accordingly (per cloud basis)
- ExternalName doesn't have Imperative commands and we need to write YAML definition for the same, so we will look in to it as and when it is required in our course. 

## Step-02: ClusterIP Service - Backend Application Setup
- Create a deployment for Backend Application (Spring Boot REST Application)
- Create a ClusterIP service for load balancing backend application. 
```t
# Create Deployment for Backend Rest App
kubectl create deployment my-backend-rest-app --image=stacksimplify/kube-helloworld:1.0.0 
kubectl get deploy

# Create ClusterIp Service for Backend Rest App
kubectl expose deployment my-backend-rest-app --port=8080 --target-port=8080 --name=my-backend-service
kubectl get svc
Observation: We don't need to specify "--type=ClusterIp" because default setting is to create ClusterIp Service. 
```
- **Important Note:** If backend application port (Container Port: 8080) and Service Port (8080) are same we don't need to use **--target-port=8080** but for avoiding the confusion i have added it. Same case applies to frontend application and service. 

- **Backend HelloWorld Application Source** [kube-helloworld](https://github.com/stacksimplify/kubernetes-fundamentals/tree/master/00-Docker-Images/02-kube-backend-helloworld-springboot/kube-helloworld)


## Step-03: LoadBalancer Service - Frontend Application Setup
- We have implemented **LoadBalancer Service** multiple times so far (in pods, replicasets and deployments), even then we are going to implement one more time to get a full architectural view in relation with ClusterIp service. 
- Create a deployment for Frontend Application (Nginx acting as Reverse Proxy)
- Create a LoadBalancer service for load balancing frontend application. 
- **Important Note:** In Nginx reverse proxy, ensure backend service name `my-backend-service` is updated when you are building the frontend container. We already built it and put ready for this demo (stacksimplify/kube-frontend-nginx:1.0.0)
- **Nginx Conf File**
```conf
server {
    listen       80;
    server_name  localhost;
    location / {
    # Update your backend application Kubernetes Cluster-IP Service name  and port below      
    # proxy_pass http://<Backend-ClusterIp-Service-Name>:<Port>;      
    proxy_pass http://my-backend-service:8080;
    }
    error_page   500 502 503 504  /50x.html;
    location = /50x.html {
        root   /usr/share/nginx/html;
    }
}
```
- **Docker Image Location:** https://hub.docker.com/repository/docker/stacksimplify/kube-frontend-nginx
- **Frontend Nginx Reverse Proxy Application Source** [kube-frontend-nginx](https://github.com/stacksimplify/kubernetes-fundamentals/tree/master/00-Docker-Images/03-kube-frontend-nginx)
```t
# Create Deployment for Frontend Nginx Proxy
kubectl create deployment my-frontend-nginx-app --image=stacksimplify/kube-frontend-nginx:1.0.0 
kubectl get deploy

# Create LoadBalancer Service for Frontend Nginx Proxy
kubectl expose deployment my-frontend-nginx-app  --type=LoadBalancer --port=80 --target-port=80 --name=my-frontend-service
kubectl get svc

# Get Load Balancer IP
kubectl get svc
http://<External-IP-from-get-service-output>/hello
curl http://<External-IP-from-get-service-output>/hello

# Scale backend with 10 replicas
kubectl scale --replicas=10 deployment/my-backend-rest-app

# Test again to view the backend service Load Balancing
http://<External-IP-from-get-service-output>/hello
curl http://<External-IP-from-get-service-output>/hello
```

## Step-04: Clean-Up Kubernetes Deployment and Services
```t
# List Services
kubectl get svc 

# Delete Services
kubectl delete service my-backend-service 
kubectl delete service my-frontend-service 

# List Deployments
kubectl get deploy

# Delete Deployments
kubectl delete deployment my-backend-rest-app   
kubectl delete deployment my-frontend-nginx-app
```


================================================
FILE: 13-YAML-Basics/README.md
================================================
---
title: YAML Basics for Kubernetes
description: Learn YAML Basics
---

## Step-01: Comments & Key Value Pairs
- Space after colon is mandatory to differentiate key and value
```yml
# Defining simple key value pairs
name: kalyan
age: 23
city: Hyderabad
```

## Step-02: Dictionary / Map
- Set of properties grouped together after an item
- Equal amount of blank space required for all the items under a dictionary
```yml
person:
  name: kalyan
  age: 23
  city: Hyderabad
```

## Step-03: Array / Lists
- Dash indicates an element of an array
```yml
person: # Dictionary
  name: kalyan
  age: 23
  city: Hyderabad
  hobbies: # List  
    - cycling
    - cookines
  hobbies: [cycling, cooking]   # List with a differnt notation  
```  

## Step-04: Multiple Lists
- Dash indicates an element of an array
```yml
person: # Dictionary
  name: kalyan
  age: 23
  city: Hyderabad
  hobbies: # List  
    - cycling
    - cooking
  hobbies: [cycling, cooking]   # List with a differnt notation  
  friends: # Multiple Lists
    - name: friend1
      age: 22
    - name: friend2
      age: 25            
```  


## Step-05: Sample Pod Tempalte for Reference
```yml
apiVersion: v1 # String
kind: Pod  # String
metadata: # Dictionary
  name: myapp-pod
  labels: # Dictionary 
    app: myapp         
spec:
  containers: # List
    - name: myapp
      image: stacksimplify/kubenginx:1.0.0
      ports: # Multiple Lists
        - containerPort: 80
          protocol: "TCP"
        - containerPort: 81
          protocol: "TCP"
```






================================================
FILE: 13-YAML-Basics/sample-file.yml
================================================
# Simple Key value Pairs
person: # Dictionary
  name: kalyan
  age: 23
  city: Hyderabd
  hobbies: # List
    - cooking
    - cycling
  friends: # Multiple lists
    - name: friend1
      age: 23
    - name: friend2
      age: 22
--- # YAML Document Separator
apiVersion: v1 # String
kind: Pod  # String
metadata: # Dictionary
  name: myapp-pod
  labels: # Dictionary 
    app: myapp    
    tier: frontend     
spec:
  containers: # List
    - name: myapp
      image: stacksimplify/kubenginx:1.0.0
      ports: # Multiple Lists
        - containerPort: 80
          protocol: "TCP"
        - containerPort: 81
          protocol: "TCP"  


                     

  

================================================
FILE: 13-YAML-Basics/yaml-demo.yaml
================================================
# Simple Key Value Pairs
person: # Dictionary
  name: kalyan
  age: 23
  city: Hyderabad
  hobbies: # List 
    - cooking
    - cycling 
  hobbies: [cooking, cycling]   # Another Notation for Lists
  friends: # Multiple Lists
    - name: friend1
      age: 23
    - name: friend2
      age: 22   
--- # YAML Document Separator         
apiVersion: v1 # String
kind: Pod  # String
metadata: # Dictionary
  name: myapp-pod
  labels: # Dictionary 
    app: myapp         
spec:
  containers: # List
    - name: myapp
      image: stacksimplify/kubenginx:1.0.0
      ports: # Multiple Lists
        - containerPort: 80
          protocol: "TCP"
        - containerPort: 81
          protocol: "TCP"

================================================
FILE: 14-yaml-declarative-k8s-pods/README.md
================================================
---
title: Kubernetes Pods with YAML
description: Learn to write and test Kubernetes Pods with YAML
---

## Step-01: Kubernetes YAML Top level Objects
- Discuss about the k8s YAML top level objects
- **kube-base-definition.yml**
```yml
apiVersion:
kind:
metadata:
  
spec:
```
- [Kubernetes Reference](https://kubernetes.io/docs/reference/)
- [Kubernetes API Reference](https://kubernetes.io/docs/reference/kubernetes-api/)
-  [Pod API Objects Reference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#pod-v1-core)

## Step-02: Create Simple Pod Definition using YAML 
- We are going to create a very basic pod definition
- **01-pod-definition.yml**
```yaml
apiVersion: v1 # String
kind: Pod  # String
metadata: # Dictionary
  name: myapp-pod
  labels: # Dictionary 
    app: myapp         
spec:
  containers: # List
    - name: myapp
      image: stacksimplify/kubenginx:1.0.0
      ports:
        - containerPort: 80
```
- **Create Pod**
```t
# Change Directory
cd kube-manifests

# Create Pod
kubectl create -f 01-pod-definition.yml
[or]
kubectl apply -f 01-pod-definition.yml

# List Pods
kubectl get pods
```

## Step-03: Create a LoadBalancer Service
- **02-pod-LoadBalancer-service.yml**
```yaml
apiVersion: v1
kind: Service
metadata:
  name: myapp-pod-loadbalancer-service  # Name of the Service
spec:
  type: LoadBalancer
  selector:
  # Loadbalance traffic across Pods matching this label selector
    app: myapp
  # Accept traffic sent to port 80    
  ports: 
    - name: http
      port: 80    # Service Port
      targetPort: 80 # Container Port
```
- **Create LoadBalancer Service for Pod**
```t
# Create Service
kubectl apply -f 02-pod-LoadBalancer-service.yml

# List Service
kubectl get svc

# Access Application
http://<Load-Balancer-Service-IP>
curl http://<Load-Balancer-Service-IP>
```

## Step-04: Clean-Up Kubernetes Pod and Service
```t
# Change Directory
cd kube-manifests

# Delete Pod
kubectl delete -f 01-pod-definition.yml

# Delete Service
kubectl delete -f  02-pod-LoadBalancer-service.yml
```


## API Object References
- [Kubernetes API Spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/)
- [Pod Spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#pod-v1-core)
- [Service Spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#service-v1-core)
- [Kubernetes API Reference](https://kubernetes.io/docs/reference/kubernetes-api/)




================================================
FILE: 14-yaml-declarative-k8s-pods/kube-base-definition.yml
================================================
apiVersion: 
kind: 
metadata:

spec:
    
# Types of Kubernetes Objects
# Pod, ReplicaSet, Deployment, Service and many more

# apiVersion: version of k8s objects
# kind: k8s objects 
# metadata: define name and labels for k8s objects
# spec: specification or real definition for k8s objects


================================================
FILE: 14-yaml-declarative-k8s-pods/kube-manifests/01-pod-definition.yml
================================================
apiVersion: v1 # String
kind: Pod # String
metadata: # Dictionary
  name: myapp-pod
  labels: # Dictionary
    app: myapp # Key Value Pairs
spec:
  containers: # List
    - name: myapp
      image: stacksimplify/kubenginx:1.0.0
      ports: # List
        - containerPort: 80


    

================================================
FILE: 14-yaml-declarative-k8s-pods/kube-manifests/02-pod-LoadBalancer-service.yml
================================================
apiVersion: v1
kind: Service
metadata:
  name: myapp-pod-loadbalancer-service
spec:
  type: LoadBalancer
  # Loadbalance traffic across Pods matching this label selector
  selector: 
    app: myapp 
  ports: 
    - name: http
      port: 80 # Service Port
      targetPort: 80 # Container Port

================================================
FILE: 15-yaml-declarative-k8s-replicasets/README.md
================================================
---
title: Kubernetes ReplicaSets with YAML
description: Learn to write and test Kubernetes ReplicaSets with YAML
---

## Step-01: Create ReplicaSet Definition
- **01-replicaset-definition.yml**
```yaml
apiVersion: apps/v1
kind: ReplicaSet
metadata:
  name: myapp2-rs
spec:
  replicas: 3 # 3 Pods should exist at all times.
  selector:  # Pods label should be defined in ReplicaSet label selector
    matchLabels:
      app: myapp2
  template:
    metadata:
      name: myapp2-pod
      labels:
        app: myapp2 # Atleast 1 Pod label should match with ReplicaSet Label Selector
    spec:
      containers:
      - name: myapp2
        image: stacksimplify/kubenginx:2.0.0
        ports:
          - containerPort: 80
```
## Step-02: Create ReplicaSet
- Create ReplicaSet with 3 Replicas
```t
# Create ReplicaSet
kubectl apply -f 01-replicaset-definition.yml

# List Replicasets
kubectl get rs
```
- Delete a pod
- ReplicaSet immediately creates the pod. 
```t
# List Pods
kubectl get pods

# Delete Pod
kubectl delete pod <Pod-Name>
```

## Step-03: Create LoadBalancer Service for ReplicaSet
```yaml
apiVersion: v1
kind: Service
metadata:
  name: replicaset-loadbalancer-service
spec:
  type: LoadBalancer 
  selector: 
    app: myapp2 
  ports: 
    - name: http
      port: 80
      targetPort: 80
     
```
- **Create LoadBalancer Service for ReplicaSet & Test**
```t
# Create LoadBalancer Service
kubectl apply -f 02-replicaset-LoadBalancer-servie.yml

# List LoadBalancer Service
kubectl get svc

# Access Application
http://<Load-Balancer-Service-IP>
```


## Step-04: Clean-Up Kubernetes ReplicaSet and Service
```t
# Change Directory
cd kube-manifests

# Delete Pod
kubectl delete -f 01-replicaset-definition.yml

# Delete Service
kubectl delete -f  02-replicaset-LoadBalancer-servie.yml
```


## API References
- [ReplicaSet](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#replicaset-v1-apps)

================================================
FILE: 15-yaml-declarative-k8s-replicasets/kube-base-definition.yml
================================================
apiVersion:
kind:
metadata:
  
spec:
    

================================================
FILE: 15-yaml-declarative-k8s-replicasets/kube-manifests/01-replicaset-definition.yml
================================================
apiVersion: apps/v1
kind: ReplicaSet  
metadata: # Dictionary
  name: myapp2-rs
spec: # Dictionary
  replicas: 3
  selector: 
    matchLabels: 
      app: myapp2
  template:
    metadata: # Dictionary
      name: myapp2-pod
      labels:
        app: myapp2 # Key Value Pairs   
    spec:
      containers: # List
        - name: myapp2-container
          image: stacksimplify/kubenginx:2.0.0
          ports: 
            - containerPort: 80          

================================================
FILE: 15-yaml-declarative-k8s-replicasets/kube-manifests/02-replicaset-LoadBalancer-servie.yml
================================================
apiVersion: v1
kind: Service
metadata:
  name: replicaset-loadbalancer-service
spec:
  type: LoadBalancer
  # Loadbalance traffic across Pods matching this label selector
  selector: 
    app: myapp2 
  ports: 
    - name: http
      port: 80 # Service Port
      targetPort: 80 # Container Port

================================================
FILE: 16-yaml-declarative-k8s-deployments/README.md
================================================
---
title: Kubernetes Deployments with YAML
description: Learn to write and test Kubernetes Deployments with YAML
---

## Step-01: Copy templates from ReplicaSet
- Copy templates from ReplicaSet and change the `kind: Deployment` 
- Update Container Image version to `3.0.0`
- Change all names to Deployment
- Change all labels and selectors to `myapp3`

```t
# Change Directory
cd kube-manifests

# Create Deployment
kubectl apply -f 01-deployment-definition.yml
kubectl get deploy
kubectl get rs
kubectl get po

# Create LoadBalancer Service
kubectl apply -f 02-deployment-LoadBalancer-service.yml

# List Service
kubectl get svc

# Get Public IP
kubectl get nodes -o wide

# Access Application
http://<Load-Balancer-Service-IP>
```

## Step-02: Clean-Up Kubernetes Deployment and Service
```t
# Change Directory
cd kube-manifests

# Delete Deployment
kubectl delete -f 01-deployment-definition.yml

# Delete LoadBalancer Service
kubectl delete -f 02-deployment-LoadBalancer-service.yml
```


## API References
- [Deployment](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#deployment-v1-apps)


================================================
FILE: 16-yaml-declarative-k8s-deployments/kube-base-definition.yml
================================================
apiVersion:
kind:
metadata:
  
spec:
    

================================================
FILE: 16-yaml-declarative-k8s-deployments/kube-manifests/01-deployment-definition.yml
================================================
apiVersion: apps/v1
kind: Deployment  
metadata: # Dictionary
  name: myapp3-deployment
spec: # Dictionary
  replicas: 3
  selector: 
    matchLabels: 
      app: myapp3
  template:
    metadata: # Dictionary
      name: myapp3-pod
      labels:
        app: myapp3 # Key Value Pairs   
    spec:
      containers: # List
        - name: myapp3-container
          image: stacksimplify/kubenginx:3.0.0
          ports: 
            - containerPort: 80          

================================================
FILE: 16-yaml-declarative-k8s-deployments/kube-manifests/02-deployment-LoadBalancer-servie.yml
================================================
apiVersion: v1
kind: Service 
metadata:
  name: deployment-loadbalancer-service
spec:
  type: LoadBalancer # ClusterIp, # NodePort
  selector:
    app: myapp3
  ports: 
    - name: http
      port: 80 # Service Port
      targetPort: 80 # Container Port

================================================
FILE: 17-yaml-declarative-k8s-services/README.md
================================================
---
title: Kubernetes Services with YAML
description: Learn to write and test Kubernetes Services with YAML
---

## Step-01: Introduction to Services
- We are going to look in to below two services in detail with a frotnend and backend example
  - LoadBalancer Service
  - ClusterIP Service

## Step-02: Create Backend Deployment & Cluster IP Service
- Write the Deployment template for backend REST application.
- Write the Cluster IP service template for backend REST application.
- **Important Notes:** 
  - Name of Cluster IP service should be `name: my-backend-service` because  same is configured in frontend nginx reverse proxy `default.conf`. 
  - Test with different name and understand the issue we face
  - We have also discussed about in our  [Section-12](https://github.com/stacksimplify/google-kubernetes-engine/tree/main/12-kubectl-imperative-k8s-services)
```t
# Change Directory
cd kube-manifests

# Deploy Backend Kubernetes Deployment and ClusterIP Service 
kubectl get all
kubectl apply -f 01-backend-deployment.yml -f 02-backend-clusterip-service.yml
kubectl get all
```


## Step-03: Create Frontend Deployment & LoadBalancer Service
- Write the Deployment template for frontend Nginx Application
- Write the LoadBalancer service template for frontend Nginx Application
```t
# Change Directory
cd kube-manifests

# Deploy Frontend Kubernetes Deployment and LoadBalancer Service 
kubectl get all
kubectl apply -f 03-frontend-deployment.yml -f 04-frontend-LoadBalancer-service.yml
kubectl get all
```
- **Access REST Application**
```t
# Get Service IP
kubectl get svc

# Access REST Application 
http://<Load-Balancer-Service-IP>/hello
curl http://<Load-Balancer-Service-IP>/hello
```

## Step-04: Delete & Recreate Objects using kubectl apply
### Delete Objects (file by file)
```t
# Change Directory 
cd kube-manifests/

# Delete Objects File by file
kubectl delete -f 01-backend-deployment.yml -f 02-backend-clusterip-service.yml -f 03-frontend-deployment.yml -f 04-frontend-LoadBalancer-service.yml
kubectl get all
```
### Recreate Objects using YAML files in a folder
```t
# Change Directory 
cd 17-yaml-declarative-k8s-services/

# Recreate Objects by referencing a folder
kubectl apply -f kube-manifests/
kubectl get all
```

### Delete Objects using YAML files in folder
```t
# Change Directory 
cd 17-yaml-declarative-k8s-services/

# Delete Objects by just referencing a folder
kubectl delete -f kube-manifests/
kubectl get all
```


## Additional References - Use Label Selectors for get and delete
- [Labels](https://kubernetes.io/docs/concepts/cluster-administration/manage-deployment/#using-labels-effectively)
- [Labels-Selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors)

================================================
FILE: 17-yaml-declarative-k8s-services/kube-base-definition.yml
================================================
apiVersion: 
kind: 
metadata:

spec:


================================================
FILE: 17-yaml-declarative-k8s-services/kube-manifests/01-backend-deployment.yml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
  name: backend-restapp
  labels:
    app: backend-restapp
    tier: backend 
spec:
  replicas: 3
  selector:
    matchLabels:
      app: backend-restapp
  template:
    metadata:
      labels:
        app: backend-restapp
        tier: backend 
    spec: 
      containers:
        - name: backend-restapp
          image: stacksimplify/kube-helloworld:1.0.0
          ports:
            - containerPort: 8080        

================================================
FILE: 17-yaml-declarative-k8s-services/kube-manifests/02-backend-clusterip-service.yml
================================================
apiVersion: v1
kind: Service
metadata:
  name: my-backend-service ## VERY VERY IMPORTANT - NGINX PROXYPASS needs this name
  labels:
    app: backend-restapp
    tier: backend   
spec:
  #type: ClusterIP is a default service in k8s
  selector:
    app: backend-restapp
  ports:
    - name: http
      port: 8080 # ClusterIP Service Port
      targetPort: 8080 # Container Port


================================================
FILE: 17-yaml-declarative-k8s-services/kube-manifests/03-frontend-deployment.yml
================================================
apiVersion: apps/v1
kind: Deployment 
metadata:
  name: frontend-nginxapp
  labels:
    app: frontend-nginxapp
    tier: frontend
spec:
  replicas: 3
  selector:
    matchLabels:
      app: frontend-nginxapp
  template: 
    metadata:
      labels: 
        app: frontend-nginxapp
        tier: frontend
    spec: 
      containers: 
        - name: frontend-nginxapp
          image: stacksimplify/kube-frontend-nginx:1.0.0
          ports:
            - containerPort: 80

================================================
FILE: 17-yaml-declarative-k8s-services/kube-manifests/04-frontend-LoadBalancer-service.yml
================================================
apiVersion: v1
kind: Service 
metadata:
  name: frontend-nginxapp-loadbalancer-service
  labels:
    app: frontend-nginxapp
    tier: frontend  
spec:
  type: LoadBalancer # ClusterIp, # NodePort
  selector:
    app: frontend-nginxapp
  ports: 
    - name: http
      port: 80 # Service Port
      targetPort: 80 # Container Port

================================================
FILE: 18-GKE-NodePort-Service/README.md
================================================
---
title: GCP Google Kubernetes Engine GKE NodePort Service
description: Implement GCP Google Kubernetes Engine GKE NodePort Service
---

## Step-00: Pre-requisites
1. Verify if GKE Cluster is created
2. Verify if kubeconfig for kubectl is configured in your local terminal
```t
# Configure kubeconfig for kubectl
gcloud container clusters get-credentials <CLUSTER-NAME> --region <REGION> --project <PROJECT>

# Replace Values CLUSTER-NAME, ZONE, PROJECT
gcloud container clusters get-credentials standard-public-cluster-1 --region us-central1 --project kdaida123

# List GKE Kubernetes Worker Nodes
kubectl get nodes

# List GKE Kubernetes Worker Nodes with -o wide option
kubectl get nodes -o wide
Observation: 
1. You should see External-IP Address (Public IP accesible via internet)
2. That is the key thing for testing the Kubernetes NodePort Service on GKE Cluster
```
## Step-01: Introduction
- Implement Kubernetes NodePort Service 

## Step-02: 01-kubernetes-deployment.yaml
```yaml
apiVersion: apps/v1
kind: Deployment 
metadata: #Dictionary
  name: myapp1-deployment
spec: # Dictionary
  replicas: 2
  selector:
    matchLabels:
      app: myapp1
  template:  
    metadata: # Dictionary
      name: myapp1-pod
      labels: # Dictionary
        app: myapp1  # Key value pairs
    spec:
      containers: # List
        - name: myapp1-container
          image: stacksimplify/kubenginx:1.0.0
          ports: 
            - containerPort: 80      
```

## Step-03: 02-kubernetes-nodeport-service.yaml
- If you don't speciy `nodePort: 30080` it will dynamically assign one port from range `30000-32768`
```yaml
apiVersion: v1
kind: Service 
metadata:
  name: myapp1-nodeport-service
spec:
  type: NodePort # clusterIP, # NodePort, # LoadBalancer, # ExternalName
  selector:
    app: myapp1
  ports: 
    - name: http
      port: 80 # Service Port
      targetPort: 80 # Container Port
      nodePort: 30080 # NodePort (Optional)(Node Port Range: 30000-32768)
```


## Step-04: Deply Kubernetes Manifests
```t
# Deploy Kubernetes Manifests
kubectl apply -f kube-manifests

# List Deployments
kubectl get deploy

# List Pods
kubectl get po

# List Services
kubectl get svc
```

## Step-05: Access Application
```t
# List Kubernetes Worker Node with -0 wide
kubectl get nodes -o wide
Observation: 
1. Make a note of any one Node External-IP (Public IP Address)

# Access Application
http://<NODE-EXTERNAL-IP>:<NodePort>
http://104.154.52.12:30080
Observation:
1. This should fail
```

## Step-06: Create Firewall Rule
```t
# Create Firewall Rule
gcloud compute firewall-rules create fw-rule-gke-node-port \
    --allow tcp:NODE_PORT

# Replace NODE_PORT
gcloud compute firewall-rules create fw-rule-gke-node-port \
    --allow tcp:30080   

# List Firewall Rules
gcloud compute firewall-rules list    
```

## Step-07:Access Application
```t
# List Kubernetes Worker Node with -0 wide
kubectl get nodes -o wide
Observation: 
1. Make a note of any one Node External-IP (Public IP Address)

# Access Application
http://<NODE-EXTERNAL-IP>:<NodePort>
http://104.154.52.12:30080
Observation:
1. This should Pass
```



## Step-08: Clean-Up
```t
# Delete Kubernetes Resources
kubectl delete -f kube-manifests

# Delete NodePort Service Firewall Rule
gcloud compute firewall-rules delete fw-rule-gke-node-port

# List Firewall Rules
gcloud compute firewall-rules list 
```




================================================
FILE: 18-GKE-NodePort-Service/kube-manifests/01-kubernetes-deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment 
metadata: #Dictionary
  name: myapp1-deployment
spec: # Dictionary
  replicas: 2
  selector:
    matchLabels:
      app: myapp1
  template:  
    metadata: # Dictionary
      name: myapp1-pod
      labels: # Dictionary
        app: myapp1  # Key value pairs
    spec:
      containers: # List
        - name: myapp1-container
          image: stacksimplify/kubenginx:1.0.0
          ports: 
            - containerPort: 80  
    

================================================
FILE: 18-GKE-NodePort-Service/kube-manifests/02-kubernetes-nodeport-service.yaml
================================================
apiVersion: v1
kind: Service 
metadata:
  name: myapp1-nodeport-service
spec:
  type: NodePort # ClusterIP, # NodePort, # LoadBalancer, # ExternalName
  selector:
    app: myapp1
  ports: 
    - name: http
      port: 80 # Service Port
      targetPort: 80 # Container Port
      nodePort: 30080 # NodePort (Optional)(Node Port Range: 30000-32768)


================================================
FILE: 19-GKE-Headless-Service/01-kube-manifests/01-kubernetes-deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment 
metadata: #Dictionary
  name: myapp1-deployment
spec: # Dictionary
  replicas: 4
  selector:
    matchLabels:
      app: myapp1
  template:  
    metadata: # Dictionary
      name: myapp1-pod
      labels: # Dictionary
        app: myapp1  # Key value pairs
    spec:
      containers: # List
        - name: myapp1-container
          #image: stacksimplify/kubenginx:1.0.0
          image: us-docker.pkg.dev/google-samples/containers/gke/hello-app:2.0
          ports: 
            - containerPort: 8080  
    

================================================
FILE: 19-GKE-Headless-Service/01-kube-manifests/02-kubernetes-clusterip-service.yaml
================================================
apiVersion: v1
kind: Service 
metadata:
  name: myapp1-cip-service
spec:
  type: ClusterIP # ClusterIP, # NodePort, # LoadBalancer, # ExternalName
  selector:
    app: myapp1
  ports: 
    - name: http
      port: 80 # Service Port
      targetPort: 8080 # Container Port



================================================
FILE: 19-GKE-Headless-Service/01-kube-manifests/03-kubernetes-headless-service.yaml
================================================
apiVersion: v1
kind: Service 
metadata:
  name: myapp1-headless-service
spec:
  #type: ClusterIP # ClusterIP, # NodePort, # LoadBalancer, # ExternalName
  clusterIP: None
  selector:
    app: myapp1
  ports: 
    - name: http
      port: 8080 # Service Port
      targetPort: 8080 # Container Port

## VERY IMPORTANT NODE
# 1. When using Headless Service, we should use both the  "Service Port and Target Port" same. 
# 2. Headless Service directly sends traffic to Pod with Pod IP and Container Port. 
# 3. DNS resolution directly happens from headless service to Pod IP.





================================================
FILE: 19-GKE-Headless-Service/02-kube-manifests-curl/01-curl-pod.yml
================================================
apiVersion: v1
kind: Pod
metadata:
  name: curl-pod
spec:
  containers:
  - name: curl
    image: curlimages/curl 
    command: [ "sleep", "600" ]

================================================
FILE: 19-GKE-Headless-Service/README.md
================================================
---
title: GCP Google Kubernetes Engine GKE Headless Service
description: Implement GCP Google Kubernetes Engine GKE Headless Service
---

## Step-00: Pre-requisites
1. Verify if GKE Cluster is created
2. Verify if kubeconfig for kubectl is configured in your local terminal
```t
# Configure kubeconfig for kubectl
gcloud container clusters get-credentials <CLUSTER-NAME> --region <REGION> --project <PROJECT>

# Replace Values CLUSTER-NAME, ZONE, PROJECT
gcloud container clusters get-credentials standard-public-cluster-1 --region us-central1 --project kdaida123

# List GKE Kubernetes Worker Nodes
kubectl get nodes
```
## Step-01: Introduction
- Implement Kubernetes ClusterIP and Headless Service
- Understand Headless Service in detail

## Step-02: 01-kubernetes-deployment.yaml
```yaml
apiVersion: apps/v1
kind: Deployment 
metadata: #Dictionary
  name: myapp1-deployment
spec: # Dictionary
  replicas: 4
  selector:
    matchLabels:
      app: myapp1
  template:  
    metadata: # Dictionary
      name: myapp1-pod
      labels: # Dictionary
        app: myapp1  # Key value pairs
    spec:
      containers: # List
        - name: myapp1-container
          #image: stacksimplify/kubenginx:1.0.0
          image: us-docker.pkg.dev/google-samples/containers/gke/hello-app:2.0
          ports: 
            - containerPort: 8080          
```

## Step-03: 02-kubernetes-clusterip-service.yaml
```yaml
apiVersion: v1
kind: Service 
metadata:
  name: myapp1-cip-service
spec:
  type: ClusterIP # ClusterIP, # NodePort, # LoadBalancer, # ExternalName
  selector:
    app: myapp1
  ports: 
    - name: http
      port: 80 # Service Port
      targetPort: 8080 # Container Port
```

## Step-04: 03-kubernetes-headless-service.yaml
- Add `spec.clusterIP: None`
###  VERY IMPORTANT NODE
1. When using Headless Service, we should use both the  "Service Port and Target Port" same. 
2. Headless Service directly sends traffic to Pod with Pod IP and Container Port. 
3. DNS resolution directly happens from headless service to Pod IP.

```yaml
apiVersion: v1
kind: Service 
metadata:
  name: myapp1-headless-service
spec:
  #type: ClusterIP # ClusterIP, # NodePort, # LoadBalancer, # ExternalName
  clusterIP: None
  selector:
    app: myapp1
  ports: 
    - name: http
      port: 8080 # Service Port
      targetPort: 8080 # Container Port

```

## Step-05: Deply Kubernetes Manifests
```t
# Deploy Kubernetes Manifests
kubectl apply -f 01-kube-manifests

# List Deployments
kubectl get deploy

# List Pods
kubectl get pods
kubectl get pods -o wide
Observation: make a note of Pod IP

# List Services
kubectl get svc
Observation: 
1. "CLUSTER-IP" will be "NONE" for Headless Service

## Sample 
Kalyans-Mac-mini:19-GKE-Headless-Service kalyanreddy$ kubectl get svc
NAME                      TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes                ClusterIP   10.24.0.1    <none>        443/TCP   135m
myapp1-cip-service        ClusterIP   10.24.2.34   <none>        80/TCP    4m9s
myapp1-headless-service   ClusterIP   None         <none>        80/TCP    4m9s
Kalyans-Mac-mini:19-GKE-Headless-Service kalyanreddy$ 

```


## Step-06: Review Curl Kubernetes Manifests
- **Project Folder:** 02-kube-manifests-curl
```yaml
apiVersion: v1
kind: Pod
metadata:
  name: curl-pod
spec:
  containers:
  - name: curl
    image: curlimages/curl 
    command: [ "sleep", "600" ]
```

## Step-07: Deply Curl-pod and Verify ClusterIP and Headless Services
```t
# Deploy curl-pod
kubectl apply -f 02-kube-manifests-curl

# List Services
kubectl get svc

# GKE Cluster Kubernetes Service Full DNS Name format
<svc>.<ns>.svc.cluster.local

# Will open up a terminal session into the container
kubectl exec -it curl-pod -- sh

# ClusterIP Service: nslookup and curl Test
nslookup myapp1-cip-service.default.svc.cluster.local
curl myapp1-cip-service.default.svc.cluster.local

### ClusterIP Service nslookup Outptu
 $ nslookup myapp1-cip-service.default.svc.cluster.local
Server:		10.24.0.10
Address:	10.24.0.10:53

Name:	myapp1-cip-service.default.svc.cluster.local
Address: 10.24.2.34

# Headless Service: nslookup and curl Test
nslookup myapp1-headless-service.default.svc.cluster.local
curl myapp1-headless-service.default.svc.cluster.local:8080
Observation:
1. There is no specific IP for Headless Service
2. It will be directly dns resolved to Pod IP
3. That said we should use the same port as Container Port for Headless Service (VERY VERY IMPORTANT)

### Headless Service nslookup Output
$ nslookup myapp1-headless-service.default.svc.cluster.local
Server:		10.24.0.10
Address:	10.24.0.10:53

Name:	myapp1-headless-service.default.svc.cluster.local
Address: 10.20.0.25
Name:	myapp1-headless-service.default.svc.cluster.local
Address: 10.20.0.26
Name:	myapp1-headless-service.default.svc.cluster.local
Address: 10.20.1.28
Name:	myapp1-headless-service.default.svc.cluster.local
Address: 10.20.1.29
```

## Step-08: Clean-Up
```t
# Delete Kubernetes Resources
kubectl delete -f 01-kube-manifests

# Delete Kubernetes Resources - Curl Pod
kubectl delete -f 02-kube-manifests-curl
```




================================================
FILE: 20-GKE-Private-Cluster/README.md
================================================
---
title: GCP Google Kubernetes Engine GKE Private Cluster
description: Implement GCP Google Kubernetes Engine GKE Private Cluster
---

## Step-01: Introduction
- Create GKE Private Cluster
- Create Cloud NAT
- Deploy Sample App and Test
- Perform Authorized Network Tests
 
## Step-02: Create Standard GKE Cluster 
- Go to Kubernetes Engine -> Clusters -> CREATE
- Select **GKE Standard -> CONFIGURE**
- **Cluster Basics**
  - **Name:** standard-cluster-private-1
  - **Location type:** Regional
  - **Zone:** us-central1-a, us-central1-b, us-central1-c
  - **Release Channel**
    - **Release Channel:** Rapid Channel
    - **Version:** LATEST AVAIALABLE ON THAT DAY
  - REST ALL LEAVE TO DEFAULTS
- **NODE POOLS: default-pool**
- **Node pool details**
  - **Name:** default-pool
  - **Number of Nodes (per Zone):** 1
- **Nodes: Configure node settings** 
  - **Image type:** Containerized Optimized OS
  - **Machine configuration**
    - **GENERAL PURPOSE SERIES:** e2
    - **Machine Type:** e2-small
  - **Boot disk type:** standard persistent disk
  - **Boot disk size(GB):** 20
  - **Enable Nodes on Spot VMs:** CHECKED
- **Node Networking:** REVIEW AND LEAVE TO DEFAULTS    
- **Node Security:** 
  - **Access scopes:** Allow full access to all Cloud APIs
  - REST ALL REVIEW AND LEAVE TO DEFAULTS
- **Node Metadata:** REVIEW AND LEAVE TO DEFAULTS
- **CLUSTER** 
  - **Automation:** REVIEW AND LEAVE TO DEFAULTS
  - **Networking:** 
    - **Network Access:** Private Cluster
    - **Access control plane using its external IP address:** BY DEFAULT CHECKED
      - **Important Note:** Disabling this option locks down external access to the cluster control plane. There is still an external IP address used by Google for cluster management purposes, but the IP address is not accessible to anyone. This setting is  permanent
    - **Enable Control Plane Global Access:** CHECKED
    - **Control Plane IP Range:** 172.16.0.0/28
    - **CHECK THIS BOX: Enable Dataplane V2** CHECK IT - IN FUTURE VERSIONS IT WILL BE BY DEFAULT ENABLED
  - **Security:** REVIEW AND LEAVE TO DEFAULTS
    - **CHECK THIS BOX: Enable Workload Identity** IN FUTURE VERSIONS IT WILL BE BY DEFAULT ENABLED
  - **Metadata:** REVIEW AND LEAVE TO DEFAULTS
  - **Features:** REVIEW AND LEAVE TO DEFAULTS
    - **Enable Compute Engine Persistent Disk CSI Driver:** SHOULD BE CHECKED BY DEFAULT - VERIFY
    - **Enable File Store CSI Driver:** CHECKED 
- CLICK ON **CREATE**

## Step-03: Review kube-manifests: 01-kubernetes-deployment.yaml
```yaml
apiVersion: apps/v1
kind: Deployment 
metadata: #Dictionary
  name: myapp1-deployment
spec: # Dictionary
  replicas: 2
  selector:
    matchLabels:
      app: myapp1
  template:  
    metadata: # Dictionary
      name: myapp1-pod
      labels: # Dictionary
        app: myapp1  # Key value pairs
    spec:
      containers: # List
        - name: myapp1-container
          image: stacksimplify/kubenginx:1.0.0
          ports: 
            - containerPort: 80      
          imagePullPolicy: Always            
```

## Step-04: Review kube-manifest: 02-kubernetes-loadbalancer-service.yaml
```yaml
apiVersion: v1
kind: Service 
metadata:
  name: myapp1-lb-service
spec:
  type: LoadBalancer # ClusterIp, # NodePort
  selector:
    app: myapp1
  ports: 
    - name: http
      port: 80 # Service Port
      targetPort: 80 # Container Port      
```

## Step-05: Deploy Kubernetes Manifests
```t
# Configure kubeconfig for kubectl
gcloud container clusters get-credentials <CLUSTER-NAME> --region <REGION> --project <PROJECT>
gcloud container clusters get-credentials standard-cluster-private-1 --region us-central1 --project kdaida123

# Change Directory
cd 20-GKE-Private-Cluster

# Deploy Kubernetes Manifests
kubectl apply -f kube-manifests/

# Verify Pods 
kubectl get pods 
Observation: SHOULD FAIL - UNABLE TO DOWNLOAD DOCKER IMAGE FROM DOCKER HUB

# Describe Pod
kubectl describe pod <POD-NAME>

# Clean-Up
kubectl delete -f kube-manifests/
```

## Step-06: Create Cloud NAT
- Go to Network Services -> CREATE CLOUD NAT GATEWAY
- **Gateway Name:** gke-us-central1-default-cloudnat-gw
- **Select Cloud Router:** 
  - **Network:** default
  - **Region:** us-central1
  - **Cloud Router:** CREATE NEW ROUTER
    - **Name:** gke-us-central1-cloud-router
    - **Description:** GKE Cloud Router Region us-central1
    - **Network:** default (POPULATED by default)
    - **Region:** us-central1 (POPULATED by default)
    - **BGP Peer keepalive interval:** 20 seconds (LEAVE TO DEFAULT)
    - Click on **CREATE**
- **Cloud NAT Mapping:** LEAVE TO DEFAULTS
- **Destination (external):** LEAVE TO DEFAULTS
- **Stackdriver logging:**  LEAVE TO DEFAULTS
- **Port allocation:** 
  - CHECK **Enable Dynamic Port Allocation**
- **Timeouts for protocol connections:** LEAVE TO DEFAULTS
- CLICK on **CREATE**  

## Step-07: Deploy Kubernetes Manifests
```t
# Configure kubeconfig for kubectl
gcloud container clusters get-credentials <CLUSTER-NAME> --region <REGION> --project <PROJECT>
gcloud container clusters get-credentials standard-cluster-private-1 --region us-central1 --project kdaida123

# Deploy Kubernetes Manifests
kubectl apply -f kube-manifests

# Verify Pods 
kubectl get pods 
Observation: SHOULD BE ABLE TO DOWNLOAD THE DOCKER IMAGE

# List Services
kubectl get svc

# Access Application
http://<External-IP>

# Clean-Up
kubectl delete -f kube-manifests
```

## Step-08: Authorized Network Test1: My Network
- Goto -> standard-cluster-private-1 -> DETAILS -> NETWORKING
- Control plane authorized networks	-> EDIT
- **Enable control plane authorized networks:** CHECKED
- CLICK ON **ADD AUTHORIZED NETWORK**
- **NAME:** MY-NETWORK-1
- **NETWORK:** 10.10.10.0/24 
- Click on **DONE**
- Click on **SAVE CHANGES**
```t
# List Kubernetes Nodes
kubectl get nodes
Observation:
1. Access to GKE API Service from our local desktop kubectl cli is lost
2. Access to GKE API Service is now allowed only from "10.10.10.0/24" network
3. In short even though our GKE API Server has Internet enabled endpoint, its access is restricted to specific network of IPs

## Sample Output
Kalyan-Mac-mini:google-kubernetes-engine kalyan$ kubectl get nodes
Unable to connect to the server: dial tcp 34.70.169.161:443: i/o timeout
Kalyan-Mac-mini:google-kubernetes-engine kalyan$ 
```

## Step-09: Authorized Network Test2: My Desktop
- Go to link [whatismyip](https://www.whatismyip.com/) and get desktop public IP 
- Goto -> standard-cluster-private-1 -> DETAILS -> NETWORKING
- Control plane authorized networks	-> EDIT
- **Enable control plane authorized networks:** CHECKED
- CLICK ON **ADD AUTHORIZED NETWORK**
- **NAME:** MY-DESKTOP-1
- **NETWORK:** 10.10.10.0/24 
- Click on **DONE**
- Click on **SAVE CHANGES**
```t
# List Kubernetes Nodes
kubectl get nodes
Observation:
1. Access to GKE API Service from our local desktop kubectl cli should be success

## Sample Output
Kalyans-Mac-mini:google-kubernetes-engine kalyan$ kubectl get nodes
NAME                                                  STATUS   ROLES    AGE   VERSION
gke-standard-cluster-pri-default-pool-90b1f67b-4z71   Ready    <none>   55m   v1.24.3-gke.900
gke-standard-cluster-pri-default-pool-90b1f67b-6xn6   Ready    <none>   55m   v1.24.3-gke.900
gke-standard-cluster-pri-default-pool-90b1f67b-dggg   Ready    <none>   55m   v1.24.3-gke.900
Kalyans-Mac-mini:google-kubernetes-engine kalyan$ 
```

## Step-10: Authorized Network Test2: Delete both network rules (Roll back to old state)
- Goto -> standard-cluster-private-1 -> DETAILS -> NETWORKING
- Control plane authorized networks	-> EDIT
- **Enable control plane authorized networks:** UN-CHECKED
- AUTHORIZED NETWORKS -> DELETE -> MY-NETWORK-1, MY-DESKTOP-1
- Click on **SAVE CHANGES**
```t
# List Kubernetes Nodes
kubectl get nodes
Observation:
1. Access to GKE API Service from our local desktop kubectl cli should be success

## Sample Output
Kalyans-Mac-mini:google-kubernetes-engine kalyan$ kubectl get nodes
NAME                                                  STATUS   ROLES    AGE   VERSION
gke-standard-cluster-pri-default-pool-90b1f67b-4z71   Ready    <none>   55m   v1.24.3-gke.900
gke-standard-cluster-pri-default-pool-90b1f67b-6xn6   Ready    <none>   55m   v1.24.3-gke.900
gke-standard-cluster-pri-default-pool-90b1f67b-dggg   Ready    <none>   55m   v1.24.3-gke.900
Kalyans-Mac-mini:google-kubernetes-engine kalyan$ 
```

## Additional Reference
- [GKE Private Cluster with Terraform](https://github.com/GoogleCloudPlatform/gke-private-cluster-demo)

================================================
FILE: 20-GKE-Private-Cluster/kube-manifests/01-kubernetes-deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment 
metadata: #Dictionary
  name: myapp1-deployment
spec: # Dictionary
  replicas: 2
  selector:
    matchLabels:
      app: myapp1
  template:  
    metadata: # Dictionary
      name: myapp1-pod
      labels: # Dictionary
        app: myapp1  # Key value pairs
    spec:
      containers: # List
        - name: myapp1-container
          image: stacksimplify/kubenginx:1.0.0
          ports: 
            - containerPort: 80  
          imagePullPolicy: Always            
    

================================================
FILE: 20-GKE-Private-Cluster/kube-manifests/02-kubernetes-loadbalancer-service.yaml
================================================
apiVersion: v1
kind: Service 
metadata:
  name: myapp1-lb-service
spec:
  type: LoadBalancer # ClusterIp, # NodePort
  selector:
    app: myapp1
  ports: 
    - name: http
      port: 80 # Service Port
      targetPort: 80 # Container Port


================================================
FILE: 21-GKE-PD-existing-SC-standard-rwo/README.md
================================================
---
title: GKE Persistent Disks Existing StorageClass standard-rwo
description: Use existing storageclass standard-rwo in Kubernetes Workloads
---

## Step-00: Pre-requisites
1. Verify if GKE Cluster is created
2. Verify if kubeconfig for kubectl is configured in your local terminal
```t
# Configure kubeconfig for kubectl
gcloud container clusters get-credentials <CLUSTER-NAME> --region <REGION> --project <PROJECT>

# Replace Values CLUSTER-NAME, ZONE, PROJECT
gcloud container clusters get-credentials standard-cluster-private-1 --region us-central1 --project kdaida123
```
3. Feature: Compute Engine persistent disk CSI Driver
  - Verify the Feature **Compute Engine persistent disk CSI Driver** enabled in GKE Cluster. 
  - This is required for mounting the Google Compute Engine Persistent Disks to Kubernetes Workloads in GKE Cluster. 


## Step-01: Introduction
- Understand Kubernetes Objects
01. Kubernetes PersistentVolumeClaim
02. Kubernetes ConfigMap
03. Kubernetes Deployment
04. Kubernetes Volumes
05. Kubernetes Volume Mounts
06. Kubernetes Environment Variables
07. Kubernetes ClusterIP Service
08. Kubernetes Init Containers
09. Kubernetes Service of Type LoadBalancer
10. Kubernetes StorageClass 

- Use predefined Storage Class `standard-rwo`
- `standard-rwo` uses balanced persistent disk

## Step-02: List Kubernetes Storage Classes in GKE Cluster
```t
# List Storage Classes
kubectl get sc
```

## Step-03: 01-persistent-volume-claim.yaml
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec: 
  accessModes:
    - ReadWriteOnce
  storageClassName: standard-rwo
  resources: 
    requests:
      storage: 4Gi

# NEED FOR PVC
# 1. Dynamic volume provisioning allows storage volumes to be created 
# on-demand. 

# 2. Without dynamic provisioning, cluster administrators have to manually 
# make calls to their cloud or storage provider to create new storage 
# volumes, and then create PersistentVolume objects to represent them in k8s

# 3. The dynamic provisioning feature eliminates the need for cluster 
# administrators to pre-provision storage. Instead, it automatically 
# provisions storage when it is requested by users.

# 4. PVC: Users request dynamically provisioned storage by including 
# a storage class in their PersistentVolumeClaim
```

## Step-04: 02-UserManagement-ConfigMap.yaml
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: usermanagement-dbcreation-script
data: 
  mysql_usermgmt.sql: |-
    DROP DATABASE IF EXISTS webappdb;
    CREATE DATABASE webappdb; 
```

## Step-05: 03-mysql-deployment.yaml
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate # terminates all the pods and replaces them with the new version.
  template: 
    metadata: 
      labels: 
        app: mysql
    spec: 
      containers:
        - name: mysql
          image: mysql:8.0
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: dbpassword11
          ports:
            - containerPort: 3306
              name: mysql    
          volumeMounts:
            - name: mysql-persistent-storage
              mountPath: /var/lib/mysql    
            - name: usermanagement-dbcreation-script
              mountPath: /docker-entrypoint-initdb.d #https://hub.docker.com/_/mysql Refer Initializing a fresh instance                                            
      volumes: 
        - name: mysql-persistent-storage
          persistentVolumeClaim:
            claimName: mysql-pv-claim
        - name: usermanagement-dbcreation-script
          configMap:
            name: usermanagement-dbcreation-script


# VERY IMPORTANT POINTS ABOUT CONTAINERS AND POD VOLUMES: 
## 1. On-disk files in a container are ephemeral
## 2. One problem is the loss of files when a container crashes. 
## 3. Kubernetes Volumes solves above two as these volumes are configured to POD and not container. 
## Only they can be mounted in Container
## 4. Using Compute Enginer Persistent Disk CSI Driver is a super generalized approach 
## for having Persistent Volumes for workloads in Kubernetes


## ENVIRONMENT VARIABLES
# 1. When you create a Pod, you can set environment variables for the 
# containers that run in the Pod. 
# 2. To set environment variables, include the env or envFrom field in 
# the configuration file.


## DEPLOYMENT STRATEGIES
# 1. Rolling deployment: This strategy  replaces pods running the old version of the application with the new version, one by one, without downtime to the cluster.
# 2. Recreate: This strategy terminates all the pods and replaces them with the new version.
# 3. Ramped slow rollout: This strategy  rolls out replicas of the new version, while in parallel, shutting down old replicas. 
# 4. Best-effort controlled rollout: This strategy  specifies a “max unavailable” parameter which indicates what percentage of existing pods can be unavailable during the upgrade, enabling the rollout to happen much more quickly.
# 5. Canary Deployment: This strategy  uses a progressive delivery approach, with one version of the application serving maximum users, and another, newer version serving a small set of test users. The test deployment is rolled out to more users if it is successful.
```

## Step-06: 04-mysql-clusterip-service.yaml
```yaml
apiVersion: v1
kind: Service
metadata: 
  name: mysql
spec:
  selector:
    app: mysql 
  ports: 
    - port: 3306  
  clusterIP: None # This means we are going to use Pod IP    
```
## Step-07: 05-UserMgmtWebApp-Deployment.yaml
```yaml
apiVersion: apps/v1
kind: Deployment 
metadata:
  name: usermgmt-webapp
  labels:
    app: usermgmt-webapp
spec:
  replicas: 1
  selector:
    matchLabels:
      app: usermgmt-webapp
  template:  
    metadata:
      labels: 
        app: usermgmt-webapp
    spec:
      initContainers:
        - name: init-db
          image: busybox:1.31
          command: ['sh', '-c', 'echo -e "Checking for the availability of MySQL Server deployment"; while ! nc -z mysql 3306; do sleep 1; printf "-"; done; echo -e "  >> MySQL DB Server has started";']      
      containers:
        - name: usermgmt-webapp
          image: stacksimplify/kube-usermgmt-webapp:1.0.0-MySQLDB
          imagePullPolicy: Always
          ports: 
            - containerPort: 8080           
          env:
            - name: DB_HOSTNAME
              value: "mysql"            
            - name: DB_PORT
              value: "3306"            
            - name: DB_NAME
              value: "webappdb"            
            - name: DB_USERNAME
              value: "root"            
            - name: DB_PASSWORD
              value: "dbpassword11"            
```
## Step-08: 06-UserMgmtWebApp-LoadBalancer-Service.yaml
```yaml
apiVersion: v1
kind: Service
metadata:
  name: usermgmt-webapp-lb-service
  labels: 
    app: usermgmt-webapp
spec: 
  type: LoadBalancer
  selector: 
    app: usermgmt-webapp
  ports: 
    - port: 80 # Service Port
      targetPort: 8080 # Container Port
```
## Step-09: Deploy kube-manifests
```t
# Deploy Kubernetes Manifests
kubectl apply -f kube-manifests/

# List Storage Classes
kubectl get sc

# List PVC
kubectl get pvc

# List PV
kubectl get pv

# List ConfigMaps
kubectl get configmap

# List Deployments
kubectl get deploy

# List Pods
kubectl get pods

# List Services
kubectl get svc

# Verify Pod Logs
kubectl get pods
kubectl logs -f <USERMGMT-POD-NAME>
kubectl logs -f usermgmt-webapp-6ff7d7d849-7lrg5

# Sample Message for Successful Start of JVM
2022-06-20 09:34:32.519  INFO 1 --- [ost-startStop-1] .r.SpringbootSecurityInternalApplication : Started SpringbootSecurityInternalApplication in 14.891 seconds (JVM running for 23.283)
20-Jun-2022 09:34:32.593 INFO [localhost-startStop-1] org.apache.catalina.startup.HostConfig.deployWAR Deployment of web application archive /usr/local/tomcat/webapps/ROOT.war has finished in 21,016 ms
20-Jun-2022 09:34:32.623 INFO [main] org.apache.coyote.AbstractProtocol.start Starting ProtocolHandler ["http-apr-8080"]
20-Jun-2022 09:34:32.688 INFO [main] org.apache.coyote.AbstractProtocol.start Starting ProtocolHandler ["ajp-apr-8009"]
20-Jun-2022 09:34:32.713 INFO [main] org.apache.catalina.startup.Catalina.start Server startup in 21275 ms
```

## Step-10: Verify Persistent Disks
- Go to Compute Engine -> Storage -> Disks
- Search for `4GB` Persistent Disk

## Step-11: Verify Kubernetes Workloads, Services ConfigMaps on Kubernetes Engine Dashboard
```t
# Verify Workloads
Go to Kubernetes Engine -> Workloads
Observation:
1. You should see "mysql" and "usermgmt-webapp" deployments

# Verify Services
Go to Kubernetes Engine -> Services & Ingress
Observation:
1. You should "mysql ClusterIP Service" and "usermgmt-webapp-lb-service"

# Verify ConfigMaps
Go to Kubernetes Engine -> Secrets & ConfigMaps
Observation: 
1. You should find the ConfigMap "usermanagement-dbcreation-script"

# Verify Persistent Volume Claim
Go to Kubernetes Engine -> Storage -> PERSISTENT VOLUME CLAIMS TAB
Observation: 
1. You should see PVC "mysql-pv-claim"

# Verify StorageClass
Go to Kubernetes Engine -> Storage -> STORAGE CLASSES TAB
Observation: 
1. You should see 3 Storage Classes out of which "standard-rwo" and "premium-rwo" are part of Compute Engine Persistent Disks (latest and greatest - Recommended for use)
2. Not recommended to use Storage Class with name "standard" (Older version)
```
## Step-13: Connect to MySQL Database
```t
# Template: Connect to MySQL Database using kubectl
kubectl run -it --rm --image=mysql:8.0 --restart=Never mysql-client -- mysql -h <Kubernetes-ClusterIP-Service> -u <USER_NAME> -p<PASSWORD>

# MySQL Client 8.0: Replace ClusterIP Service, Username and Password
kubectl run -it --rm --image=mysql:8.0 --restart=Never mysql-client -- mysql -h mysql -u root -pdbpassword11

mysql> show schemas;
mysql> use webappdb;
mysql> show tables;
mysql> select * from user;
mysql> exit
```


## Step-12: Access Application
```t
# List Services
kubectl get svc

# Access Application
http://<ExternalIP-from-get-service-output>
Username: admin101
Password: password101

# Create New User
Username: admin102
Password: password102
First Name: fname102
Last Name: lname102
Email Address: admin102@stacksimplify.com
Social Security Address: ssn102

# Verify this user in MySQL DB
# Template: Connect to MySQL Database using kubectl
kubectl run -it --rm --image=mysql:8.0 --restart=Never mysql-client -- mysql -h <Kubernetes-ClusterIP-Service> -u <USER_NAME> -p<PASSWORD>

# MySQL Client 8.0: Replace ClusterIP Service, Username and Password
kubectl run -it --rm --image=mysql:8.0 --restart=Never mysql-client -- mysql -h mysql -u root -pdbpassword11

mysql> show schemas;
mysql> use webappdb;
mysql> show tables;
mysql> select * from user;
mysql> select * from user;
Observation:
1. You should find the newly created user from browser successfully created in MySQL DB.
2. In simple terms, we have done the following
a. Created MySQL k8s Deployment in GKE CLuster
b. Created Java WebApplication  k8s Deployment in GKE Cluster
c. Accessed Application using GKE Load Balancer IP using browser
d. Created a new user in this application and that user successfully stored in MySQL DB.
e. END TO END FLOW from Browser to DB using GKE Cluster we have seen.
```

## Step-13: Verify GCE PD CSI Driver Logging
- https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/gce-pd-csi-driver
```t
# Cloud Logging Query 
 resource.type="k8s_container"
 resource.labels.project_id="PROJECT_ID"
  resource.labels.cluster_name="CLUSTER_NAME"
 resource.labels.namespace_name="kube-system"
 resource.labels.container_name="gce-pd-driver"

# Cloud Logging Query (Replace Values)
 resource.type="k8s_container"
 resource.labels.project_id="kdaida123"
 resource.labels.cluster_name="standard-cluster-private-1"
 resource.labels.namespace_name="kube-system"
 resource.labels.container_name="gce-pd-driver"
```

## Step-14: Clean-Up
```t
# Delete kube-manifests
kubectl delete -f kube-manifests/
```

## Reference
- [Using the Compute Engine persistent disk CSI Driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/gce-pd-csi-driver)


## Addtional-Data-01
1. It enables the automatic deployment and management of the persistent disk driver without having to manually set it up.
2. You can use customer-managed encryption keys (CMEKs). These keys are used to encrypt the data encryption keys that encrypt your data. 
3. You can use volume snapshots with the Compute Engine persistent disk CSI Driver. Volume snapshots let you create a copy of your volume at a specific point in time. You can use this copy to bring a volume back to a prior state or to provision a new volume.
4. Bug fixes and feature updates are rolled out independently from minor Kubernetes releases. This release schedule typically results in a faster release cadence.

## Addtional-Data-02
- For Standard Clusters: The Compute Engine persistent disk CSI Driver is enabled by default on newly created clusters 
  - Linux clusters: GKE version 1.18.10-gke.2100 or later, or 1.19.3-gke.2100 or later.
  - Windows clusters: GKE version 1.22.6-gke.300 or later, or 1.23.2-gke.300 or later.
- For Autopilot clusters: The Compute Engine persistent disk CSI Driver is enabled by default and cannot be disabled or edited.

## Addtional-Data-03
- GKE automatically installs the following StorageClasses:
  - standard-rwo:  using balanced persistent disk
  - premium-rwo: using SSD persistent disk
- For Autopilot clusters: The default StorageClass is standard-rwo, which uses the Compute Engine persistent disk CSI Driver. 
- For Standard clusters: The default StorageClass uses the Kubernetes in-tree gcePersistentDisk volume plugin.
```t
# You can find the name of your installed StorageClasses by running the following command:
kubectl get sc
or
kubectl get storageclass
```


================================================
FILE: 21-GKE-PD-existing-SC-standard-rwo/kube-manifests/01-persistent-volume-claim.yaml
================================================
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec: 
  accessModes:
    - ReadWriteOnce
  storageClassName: standard-rwo
  resources: 
    requests:
      storage: 4Gi

# NEED FOR PVC
# 1. Dynamic volume provisioning allows storage volumes to be created 
# on-demand. 

# 2. Without dynamic provisioning, cluster administrators have to manually 
# make calls to their cloud or storage provider to create new storage 
# volumes, and then create PersistentVolume objects to represent them in k8s

# 3. The dynamic provisioning feature eliminates the need for cluster 
# administrators to pre-provision storage. Instead, it automatically 
# provisions storage when it is requested by users.

# 4. PVC: Users request dynamically provisioned storage by including 
# a storage class in their PersistentVolumeClaim



================================================
FILE: 21-GKE-PD-existing-SC-standard-rwo/kube-manifests/02-UserManagement-ConfigMap.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
  name: usermanagement-dbcreation-script
data: 
  mysql_usermgmt.sql: |-
    DROP DATABASE IF EXISTS webappdb;
    CREATE DATABASE webappdb; 


# CONFIG MAP
# 1. A ConfigMap is an API object used to store non-confidential data in 
# key-value pairs. 

# 2. Pods can consume ConfigMaps as 
## 2.1: environment variables, 
## 2.2: command-line arguments, 
## 2.3: or as configuration files in a volume. 
## We are going to use this in our MySQL k8s Deployment  

# 3. YAML Notation
## YAML Notation: |-: "strip": remove the line feed, remove the trailing blank lines.
## Additional YAML Notation Reference: https://stackoverflow.com/questions/3790454/how-do-i-break-a-string-in-yaml-over-multiple-lines

================================================
FILE: 21-GKE-PD-existing-SC-standard-rwo/kube-manifests/03-mysql-deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate # terminates all the pods and replaces them with the new version.
  template: 
    metadata: 
      labels: 
        app: mysql
    spec: 
      containers:
        - name: mysql
          image: mysql:8.0
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: dbpassword11
          ports:
            - containerPort: 3306
              name: mysql    
          volumeMounts:
            - name: mysql-persistent-storage
              mountPath: /var/lib/mysql    
            - name: usermanagement-dbcreation-script
              mountPath: /docker-entrypoint-initdb.d #https://hub.docker.com/_/mysql Refer Initializing a fresh instance                                            
      volumes: 
        - name: mysql-persistent-storage
          persistentVolumeClaim:
            claimName: mysql-pv-claim
        - name: usermanagement-dbcreation-script
          configMap:
            name: usermanagement-dbcreation-script


# VERY IMPORTANT POINTS ABOUT CONTAINERS AND POD VOLUMES: 
## 1. On-disk files in a container are ephemeral
## 2. One problem is the loss of files when a container crashes. 
## 3. Kubernetes Volumes solves above two as these volumes are configured to POD and not container. 
## Only they can be mounted in Container
## 4. Using Compute Enginer Persistent Disk CSI Driver is a super generalized approach 
## for having Persistent Volumes for workloads in Kubernetes


## ENVIRONMENT VARIABLES
# 1. When you create a Pod, you can set environment variables for the 
# containers that run in the Pod. 
# 2. To set environment variables, include the env or envFrom field in 
# the configuration file.


## DEPLOYMENT STRATEGIES
# 1. Rolling deployment: This strategy  replaces pods running the old version of the application with the new version, one by one, without downtime to the cluster.
# 2. Recreate: This strategy terminates all the pods and replaces them with the new version.
# 3. Ramped slow rollout: This strategy  rolls out replicas of the new version, while in parallel, shutting down old replicas. 
# 4. Best-effort controlled rollout: This strategy  specifies a “max unavailable” parameter which indicates what percentage of existing pods can be unavailable during the upgrade, enabling the rollout to happen much more quickly.
# 5. Canary Deployment: This strategy  uses a progressive delivery approach, with one version of the application serving maximum users, and another, newer version serving a small set of test users. The test deployment is rolled out to more users if it is successful.

================================================
FILE: 21-GKE-PD-existing-SC-standard-rwo/kube-manifests/04-mysql-clusterip-service.yaml
================================================
apiVersion: v1
kind: Service
metadata: 
  name: mysql
spec:
  selector:
    app: mysql 
  ports: 
    - port: 3306  
  clusterIP: None # This means we are going to use Pod IP    

================================================
FILE: 21-GKE-PD-existing-SC-standard-rwo/kube-manifests/05-UserMgmtWebApp-Deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment 
metadata:
  name: usermgmt-webapp
  labels:
    app: usermgmt-webapp
spec:
  replicas: 1
  selector:
    matchLabels:
      app: usermgmt-webapp
  template:  
    metadata:
      labels: 
        app: usermgmt-webapp
    spec:
      initContainers:
        - name: init-db
          image: busybox:1.31
          command: ['sh', '-c', 'echo -e "Checking for the availability of MySQL Server deployment"; while ! nc -z mysql 3306; do sleep 1; printf "-"; done; echo -e "  >> MySQL DB Server has started";']      
      containers:
        - name: usermgmt-webapp
          image: stacksimplify/kube-usermgmt-webapp:1.0.0-MySQLDB
          ports: 
            - containerPort: 8080           
          env:
            - name: DB_HOSTNAME
              value: "mysql"            
            - name: DB_PORT
              value: "3306"            
            - name: DB_NAME
              value: "webappdb"            
            - name: DB_USERNAME
              value: "root"            
            - name: DB_PASSWORD
              value: "dbpassword11"            

================================================
FILE: 21-GKE-PD-existing-SC-standard-rwo/kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml
================================================
apiVersion: v1
kind: Service
metadata:
  name: usermgmt-webapp-lb-service
spec: 
  type: LoadBalancer
  selector: 
    app: usermgmt-webapp
  ports: 
    - port: 80 # Service Port
      targetPort: 8080 # Container Port

================================================
FILE: 22-GKE-PD-existing-SC-premium-rwo/README.md
================================================
---
title: GKE Persistent Disks Existing StorageClass premium-rwo
description: Use existing storageclass premium-rwo in Kubernetes Workloads
---

## Step-00: Pre-requisites
1. Verify if GKE Cluster is created
2. Verify if kubeconfig for kubectl is configured in your local terminal
```t
# Configure kubeconfig for kubectl
gcloud container clusters get-credentials <CLUSTER-NAME> --region <REGION> --project <PROJECT>

# Replace Values CLUSTER-NAME, ZONE, PROJECT
gcloud container clusters get-credentials standard-cluster-private-1 --region us-central1 --project kdaida123
```
3. Feature: Compute Engine persistent disk CSI Driver
  - Verify the Feature **Compute Engine persistent disk CSI Driver** enabled in GKE Cluster. 
  - This is required for mounting the Google Compute Engine Persistent Disks to Kubernetes Workloads in GKE Cluster.

## Step-01: Introduction
- Understand Kubernetes Objects
01. Kubernetes PersistentVolumeClaim
02. Kubernetes ConfigMap
03. Kubernetes Deployment
04. Kubernetes Volumes
05. Kubernetes Volume Mounts
06. Kubernetes Environment Variables
07. Kubernetes ClusterIP Service
08. Kubernetes Init Containers
09. Kubernetes Service of Type LoadBalancer
10. Kubernetes StorageClass 

- Use the predefined Storage class `premium-rwo`
- By default, dynamically provisioned PersistentVolumes use the default StorageClass and are backed by `standard hard disks`. 
- If you need faster SSDs, you can use the `premium-rwo` storage class from the Compute Engine persistent disk CSI Driver to provision your volumes. 
- This can be done by setting the storageClassName field to `premium-rwo` in your PersistentVolumeClaim 
- `premium-rwo Storage Class` will provision `SSD Persistent Disk`

## Step-02: List Kubernetes Storage Classes in GKE Cluster
```t
# List Storage Classes
kubectl get sc
```

## Step-03: 01-persistent-volume-claim.yaml
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec: 
  accessModes:
    - ReadWriteOnce
  storageClassName: premium-rwo 
  resources: 
    requests:
      storage: 4Gi
```

## Step-04: Other Kubernetes YAML Manifests
- No changes to other Kubernetes YAML Manifests
- They are same as previous section
1. 01-persistent-volume-claim.yaml
2. 02-UserManagement-ConfigMap.yaml
3. 03-mysql-deployment.yaml
4. 04-mysql-clusterip-service.yaml
5. 05-UserMgmtWebApp-Deployment.yaml
6. 06-UserMgmtWebApp-LoadBalancer-Service.yaml

## Step-05: Deploy kube-manifests
```t
# Deploy Kubernetes Manifests
kubectl apply -f kube-manifests/

# List Storage Classes
kubectl get sc

# List PVC
kubectl get pvc

# List PV
kubectl get pv

# List ConfigMaps
kubectl get configmap

# List Deployments
kubectl get deploy

# List Pods
kubectl get pods

# List Services
kubectl get svc

# Verify Pod Logs
kubectl get pods
kubectl logs -f <USERMGMT-POD-NAME>
kubectl logs -f usermgmt-webapp-6ff7d7d849-7lrg5
```

## Step-06: Verify Persistent Disks
- Go to Compute Engine -> Storage -> Disks
- Search for `4GB` Persistent Disk
- **Observation:** You should see the disk type as **SSD persistent disk**


## Step-07: Access Application
```t
# List Services
kubectl get svc

# Access Application
http://<ExternalIP-from-get-service-output>
Username: admin101
Password: password101
```

## Step-08: Clean-Up
```t
# Delete kube-manifests
kubectl delete -f kube-manifests/
```

## Reference
- [Using the Compute Engine persistent disk CSI Driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/gce-pd-csi-driver)

================================================
FILE: 22-GKE-PD-existing-SC-premium-rwo/kube-manifests/01-persistent-volume-claim.yaml
================================================
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec: 
  accessModes:
    - ReadWriteOnce
  storageClassName: premium-rwo 
  resources: 
    requests:
      storage: 4Gi

# NEED FOR PVC
# 1. Dynamic volume provisioning allows storage volumes to be created 
# on-demand. 

# 2. Without dynamic provisioning, cluster administrators have to manually 
# make calls to their cloud or storage provider to create new storage 
# volumes, and then create PersistentVolume objects to represent them in k8s

# 3. The dynamic provisioning feature eliminates the need for cluster 
# administrators to pre-provision storage. Instead, it automatically 
# provisions storage when it is requested by users.

# 4. PVC: Users request dynamically provisioned storage by including 
# a storage class in their PersistentVolumeClaim



================================================
FILE: 22-GKE-PD-existing-SC-premium-rwo/kube-manifests/02-UserManagement-ConfigMap.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
  name: usermanagement-dbcreation-script
data: 
  mysql_usermgmt.sql: |-
    DROP DATABASE IF EXISTS webappdb;
    CREATE DATABASE webappdb; 


# CONFIG MAP
# 1. A ConfigMap is an API object used to store non-confidential data in 
# key-value pairs. 

# 2. Pods can consume ConfigMaps as 
## 2.1: environment variables, 
## 2.2: command-line arguments, 
## 2.3: or as configuration files in a volume. We are going to use this in our MySQL Deployment)  


================================================
FILE: 22-GKE-PD-existing-SC-premium-rwo/kube-manifests/03-mysql-deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate 
  template: 
    metadata: 
      labels: 
        app: mysql
    spec: 
      containers:
        - name: mysql
          image: mysql:8.0
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: dbpassword11
          ports:
            - containerPort: 3306
              name: mysql    
          volumeMounts:
            - name: mysql-persistent-storage
              mountPath: /var/lib/mysql    
            - name: usermanagement-dbcreation-script
              mountPath: /docker-entrypoint-initdb.d #https://hub.docker.com/_/mysql Refer Initializing a fresh instance                                            
      volumes: 
        - name: mysql-persistent-storage
          persistentVolumeClaim:
            claimName: mysql-pv-claim
        - name: usermanagement-dbcreation-script
          configMap:
            name: usermanagement-dbcreation-script



================================================
FILE: 22-GKE-PD-existing-SC-premium-rwo/kube-manifests/04-mysql-clusterip-service.yaml
================================================
apiVersion: v1
kind: Service
metadata: 
  name: mysql
spec:
  selector:
    app: mysql 
  ports: 
    - port: 3306  
  clusterIP: None # This means we are going to use Pod IP    

================================================
FILE: 22-GKE-PD-existing-SC-premium-rwo/kube-manifests/05-UserMgmtWebApp-Deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment 
metadata:
  name: usermgmt-webapp
  labels:
    app: usermgmt-webapp
spec:
  replicas: 1
  selector:
    matchLabels:
      app: usermgmt-webapp
  template:  
    metadata:
      labels: 
        app: usermgmt-webapp
    spec:
      initContainers:
        - name: init-db
          image: busybox:1.31
          command: ['sh', '-c', 'echo -e "Checking for the availability of MySQL Server deployment"; while ! nc -z mysql 3306; do sleep 1; printf "-"; done; echo -e "  >> MySQL DB Server has started";']      
      containers:
        - name: usermgmt-webapp
          image: stacksimplify/kube-usermgmt-webapp:1.0.0-MySQLDB
          ports: 
            - containerPort: 8080           
          env:
            - name: DB_HOSTNAME
              value: "mysql"            
            - name: DB_PORT
              value: "3306"            
            - name: DB_NAME
              value: "webappdb"            
            - name: DB_USERNAME
              value: "root"            
            - name: DB_PASSWORD
              value: "dbpassword11"            

================================================
FILE: 22-GKE-PD-existing-SC-premium-rwo/kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml
================================================
apiVersion: v1
kind: Service
metadata:
  name: usermgmt-webapp-lb-service
  labels: 
    app: usermgmt-webapp
spec: 
  type: LoadBalancer
  selector: 
    app: usermgmt-webapp
  ports: 
    - port: 80 # Service Port
      targetPort: 8080 # Container Port

================================================
FILE: 23-GKE-PD-Custom-StorageClass/README.md
================================================
---
title: GKE Persistent Disks Custom StorageClass 
description: Use Custom storageclass to provision Google Disks for Kubernetes Workloads
---

## Step-00: Pre-requisites
1. Verify if GKE Cluster is created
2. Verify if kubeconfig for kubectl is configured in your local terminal
```t
# Configure kubeconfig for kubectl
gcloud container clusters get-credentials <CLUSTER-NAME> --region <REGION> --project <PROJECT>

# Replace Values CLUSTER-NAME, ZONE, PROJECT
gcloud container clusters get-credentials standard-cluster-private-1 --region us-central1 --project kdaida123
```
3. Feature: Compute Engine persistent disk CSI Driver
  - Verify the Feature **Compute Engine persistent disk CSI Driver** enabled in GKE Cluster. 
  - This is required for mounting the Google Compute Engine Persistent Disks to Kubernetes Workloads in GKE Cluster.


## Step-01: Introduction
- **Feaute-1:** Create custom Kubernetes StorageClass instead of using predefined one in GKE Cluster. custom storage class `gke-pd-standard-rwo-sc`
- **Feature-2:** Test `allowVolumeExpansion: true` in Storage Class
- **Feature-3:** Use `reclaimPolicy: Retain` in Storage Class and Test it 

## Step-02: List Kubernetes Storage Classes in GKE Cluster
```t
# List Storage Classes
kubectl get sc
```

## Step-03: 00-storage-class.yaml
```yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata: 
  name: gke-pd-standard-rwo-sc
provisioner: pd.csi.storage.gke.io
volumeBindingMode: WaitForFirstConsumer 
allowVolumeExpansion: true
reclaimPolicy: Retain 
parameters:
  type: pd-balanced

# STORAGE CLASS 
# 1. A StorageClass provides a way for administrators 
# to describe the "classes" of storage they offer.
# 2. Here we are offering GCP PD Storage for GKE Cluster
```

## Step-04: 01-persistent-volume-claim.yaml
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec: 
  accessModes:
    - ReadWriteOnce
  storageClassName: gke-pd-standard-rwo-sc
  resources: 
    requests:
      storage: 4Gi
```


## Step-05: Other Kubernetes YAML Manifests
- No changes to other Kubernetes YAML Manifests
- They are same as previous section
- 02-UserManagement-ConfigMap.yaml
- 03-mysql-deployment.yaml
- 04-mysql-clusterip-service.yaml
- 05-UserMgmtWebApp-Deployment.yaml
- 06-UserMgmtWebApp-LoadBalancer-Service.yaml

## Step-06: Deploy kube-manifests
```t
# Deploy Kubernetes Manifests
kubectl apply -f kube-manifests/

# List Storage Classes
kubectl get sc
Observation: 
1. You should find the new custom storage class object created with name as "gke-pd-standard-rwo-sc"

# List PVC
kubectl get pvc

# List PV
kubectl get pv

# List ConfigMaps
kubectl get configmap

# List Deployments
kubectl get deploy

# List Pods
kubectl get pods

# List Services
kubectl get svc

# Verify Pod Logs
kubectl get pods
kubectl logs -f <USERMGMT-POD-NAME>
kubectl logs -f usermgmt-webapp-6ff7d7d849-7lrg5
```

## Step-07: Verify Persistent Disks
- Go to Compute Engine -> Storage -> Disks
- Search for `4GB` Persistent Disk
- **Observation:** You should see the disk type as **Balanced persistent disk**



## Step-08: Access Application
```t
# List Services
kubectl get svc

# Access Application
http://<ExternalIP-from-get-service-output>
Username: admin101
Password: password101

# Create New User (Used for testing  `allowVolumeExpansion: true` Option)
Username: admin102
Password: password102
First Name: fname102
Last Name: lname102
Email Address: admin102@stacksimplify.com
Social Security Address: ssn102
```

## Step-09: Update 01-persistent-volume-claim.yaml from 4Gi to 8Gi
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec: 
  accessModes:
    - ReadWriteOnce
  storageClassName: gke-pd-standard-rwo-sc
  resources: 
    requests:
      #storage: 4Gi # Commment at Step-09
      storage: 8Gi # UnCommment at Step-09
```

## Step-10: Deploy updated kube-manifests
```t
# Deploy Kubernetes Manifests
kubectl apply -f kube-manifests/

# List PVC
kubectl get pvc
Observation:
1. Wait for 2 to 3 mins and automatically CAPACITY value changes from 4Gi to 8Gi

# List PV
kubectl get pv
Observation:
1. Wait for 2 to 3 mins and automatically CAPACITY value changes from 4Gi to 8Gi

# Access Application
http://<ExternalIP-from-get-service-output>
Username: admin101
Password: password101
Observation:
1. No impact to underlying MySQL Database data.
2. VolumeExpansion is seamless without impacting the real data. 
3. We should find the two users which are present before VolumeExpansion as-is.
```
## Step-11: Verify Persistent Disks
- Go to Compute Engine -> Storage -> Disks
- Search for `8GB` Persistent Disk, as 4GB disk expaned to 8GB now.
- **Observation:** You should see the disk type as **Balanced persistent disk**


## Step-12: Verify reclaimPolicy: Retain
```t
# Delete kube-manifests
kubectl delete -f kube-manifests/

# List Storage Class
kubectl get sc
Observation:
1. Custom storage class deleted

# List PVC
kubectl get pvc
Observation:
1. PVC deleted

# List PV
kubectl get pv
Observation:
1. PV still present
2. PV STATUS will be in "Released", not used by anyoe.
```

## Step-13: Verify Persistent Disks
- Go to Compute Engine -> Storage -> Disks
- Search for `8GB` Persistent Disk.
- **Observation:** You should see the disk is still present even after all kube-manifests (storageclass, pvc) all deleted.
- This is due to we have used **reclaimPolicy: Retain** in Custom Storage Class


## Step-14: Clone Persistent Disk
- **Question:** Why we are cloning the disk ?
- **Answer:** In the next demo, we are going use the **pre-existing persistent disk** in our demo. For that purpose we are cloning it. 
- Go to Compute Engine -> Storage -> Disks
- Search for `8GB` Persistent Disk.
- Click on **Clone Disk**
- **Name:** preexisting-pd
- **Description:** preexisting-pd Demo with GKE
- **Location:** Single
- **Snapshot Schedule:** UNCHECK
- Click on **CREATE**

## Step-15: Delete Retained Persistent Disk from this Demo
- Go to Compute Engine -> Storage -> Disks
- Search for `8GB` Persistent Disk.
- **Disk Name:**  pvc-3f2c1daa-122d-4bdb-a7b6-b9943631cc14
- Click on **DELETE DISK**
```t
# List PV
kubectl get pv

# Delete  PV 
kubectl delete pv pvc-3f2c1daa-122d-4bdb-a7b6-b9943631cc14 

# List PV
kubectl get pv
```

## Step-16: Change PVC 8Gi to 4Gi: 01-persistent-volume-claim.yaml
- Change PVC 8Gi to 4Gi so that `kube-manifests` will be demo ready for students. 
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec: 
  accessModes:
    - ReadWriteOnce
  storageClassName: gke-pd-standard-rwo-sc
  resources: 
    requests:
      storage: 4Gi # Commment at Step-09
      #storage: 8Gi # UnCommment at Step-09
```


## Reference
- [Using the Compute Engine persistent disk CSI Driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/gce-pd-csi-driver)


================================================
FILE: 23-GKE-PD-Custom-StorageClass/kube-manifests/00-storage-class.yaml
================================================
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata: 
  name: gke-pd-standard-rwo-sc
provisioner: pd.csi.storage.gke.io
volumeBindingMode: WaitForFirstConsumer 
allowVolumeExpansion: true
reclaimPolicy: Retain 
parameters:
  type: pd-balanced # Other Options supported are pd-ssd, pd-standard

# STORAGE CLASS 
# 1. A StorageClass provides a way for administrators 
# to describe the "classes" of storage they offer.
# 2. Here we are offering GCP PD Storage for GKE Cluster

================================================
FILE: 23-GKE-PD-Custom-StorageClass/kube-manifests/01-persistent-volume-claim.yaml
================================================
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec: 
  accessModes:
    - ReadWriteOnce
  storageClassName: gke-pd-standard-rwo-sc
  resources: 
    requests:
      storage: 4Gi # Commment at Step-09
      #storage: 8Gi # UnCommment at Step-09
      
# NEED FOR PVC
# 1. Dynamic volume provisioning allows storage volumes to be created 
# on-demand. 

# 2. Without dynamic provisioning, cluster administrators have to manually 
# make calls to their cloud or storage provider to create new storage 
# volumes, and then create PersistentVolume objects to represent them in k8s

# 3. The dynamic provisioning feature eliminates the need for cluster 
# administrators to pre-provision storage. Instead, it automatically 
# provisions storage when it is requested by users.

# 4. PVC: Users request dynamically provisioned storage by including 
# a storage class in their PersistentVolumeClaim



================================================
FILE: 23-GKE-PD-Custom-StorageClass/kube-manifests/02-UserManagement-ConfigMap.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
  name: usermanagement-dbcreation-script
data: 
  mysql_usermgmt.sql: |-
    DROP DATABASE IF EXISTS webappdb;
    CREATE DATABASE webappdb; 


# CONFIG MAP
# 1. A ConfigMap is an API object used to store non-confidential data in 
# key-value pairs. 

# 2. Pods can consume ConfigMaps as 
## 2.1: environment variables, 
## 2.2: command-line arguments, 
## 2.3: or as configuration files in a volume. We are going to use this in our MySQL Deployment)  


================================================
FILE: 23-GKE-PD-Custom-StorageClass/kube-manifests/03-mysql-deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate 
  template: 
    metadata: 
      labels: 
        app: mysql
    spec: 
      containers:
        - name: mysql
          image: mysql:8.0
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: dbpassword11
          ports:
            - containerPort: 3306
              name: mysql    
          volumeMounts:
            - name: mysql-persistent-storage
              mountPath: /var/lib/mysql    
            - name: usermanagement-dbcreation-script
              mountPath: /docker-entrypoint-initdb.d #https://hub.docker.com/_/mysql Refer Initializing a fresh instance                                            
      volumes: 
        - name: mysql-persistent-storage
          persistentVolumeClaim:
            claimName: mysql-pv-claim
        - name: usermanagement-dbcreation-script
          configMap:
            name: usermanagement-dbcreation-script



================================================
FILE: 23-GKE-PD-Custom-StorageClass/kube-manifests/04-mysql-clusterip-service.yaml
================================================
apiVersion: v1
kind: Service
metadata: 
  name: mysql
spec:
  selector:
    app: mysql 
  ports: 
    - port: 3306  
  clusterIP: None # This means we are going to use Pod IP    

================================================
FILE: 23-GKE-PD-Custom-StorageClass/kube-manifests/05-UserMgmtWebApp-Deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment 
metadata:
  name: usermgmt-webapp
  labels:
    app: usermgmt-webapp
spec:
  replicas: 1
  selector:
    matchLabels:
      app: usermgmt-webapp
  template:  
    metadata:
      labels: 
        app: usermgmt-webapp
    spec:
      initContainers:
        - name: init-db
          image: busybox:1.31
          command: ['sh', '-c', 'echo -e "Checking for the availability of MySQL Server deployment"; while ! nc -z mysql 3306; do sleep 1; printf "-"; done; echo -e "  >> MySQL DB Server has started";']      
      containers:
        - name: usermgmt-webapp
          image: stacksimplify/kube-usermgmt-webapp:1.0.0-MySQLDB
          imagePullPolicy: Always
          ports: 
            - containerPort: 8080           
          env:
            - name: DB_HOSTNAME
              value: "mysql"            
            - name: DB_PORT
              value: "3306"            
            - name: DB_NAME
              value: "webappdb"            
            - name: DB_USERNAME
              value: "root"            
            - name: DB_PASSWORD
              value: "dbpassword11"            

================================================
FILE: 23-GKE-PD-Custom-StorageClass/kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml
================================================
apiVersion: v1
kind: Service
metadata:
  name: usermgmt-webapp-lb-service
  labels: 
    app: usermgmt-webapp
spec: 
  type: LoadBalancer
  selector: 
    app: usermgmt-webapp
  ports: 
    - port: 80 # Service Port
      targetPort: 8080 # Container Port

================================================
FILE: 24-GKE-PD-preexisting-PD/README.md
================================================
---
title: GKE Persistent Disks Preexisting PD
description: Use Google Disks Preexisting PD for Kubernetes Workloads
---

## Step-00: Pre-requisites
1. Verify if GKE Cluster is created
2. Verify if kubeconfig for kubectl is configured in your local terminal
```t
# Configure kubeconfig for kubectl
gcloud container clusters get-credentials <CLUSTER-NAME> --region <REGION> --project <PROJECT>

# Replace Values CLUSTER-NAME, ZONE, PROJECT
gcloud container clusters get-credentials standard-cluster-private-1 --region us-central1 --project kdaida123
```
3. Feature: Compute Engine persistent disk CSI Driver
  - Verify the Feature **Compute Engine persistent disk CSI Driver** enabled in GKE Cluster. 
  - This is required for mounting the Google Compute Engine Persistent Disks to Kubernetes Workloads in GKE Cluster. 


## Step-01: Introduction
- Use the **pre-existing Persistent Disk** created in previous demo.
- As part of this demo, we are going to provision the **Persistent Volume (PV)** manually. We call this as Static Provisioning. 


## Step-02: List Kubernetes Storage Classes in GKE Cluster
```t
# List Storage Classes
kubectl get sc
```

## Step-03: 00-persistent-volume.yaml
```yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: preexisting-pd
spec:
  storageClassName: standard-rwo
  capacity:
    storage: 8Gi
  accessModes:
    - ReadWriteOnce
  claimRef:
    namespace: default
    name: mysql-pv-claim
  gcePersistentDisk:
    pdName: preexisting-pd
    fsType: ext4
```

## Step-04: 01-persistent-volume-claim.yaml
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec: 
  accessModes:
    - ReadWriteOnce
  storageClassName: standard-rwo
  resources: 
    requests:
      storage: 8Gi
```

## Step-05: Other Kubernetes YAML Manifests
- No changes to other Kubernetes YAML Manifests
- They are same as previous section
- 02-UserManagement-ConfigMap.yaml
- 03-mysql-deployment.yaml
- 04-mysql-clusterip-service.yaml
- 05-UserMgmtWebApp-Deployment.yaml
- 06-UserMgmtWebApp-LoadBalancer-Service.yaml

## Step-06: Deploy kube-manifests
```t
# Deploy Kubernetes Manifests
kubectl apply -f kube-manifests/

# List Storage Class
kubectl get sc

# List PVC
kubectl get pvc

# List PV
kubectl get pv

# List ConfigMaps
kubectl get configmap

# List Deployments
kubectl get deploy

# List Pods
kubectl get pods

# List Services
kubectl get svc

# Verify Pod Logs
kubectl get pods
kubectl logs -f <USERMGMT-POD-NAME>
kubectl logs -f usermgmt-webapp-6ff7d7d849-7lrg5
```

## Step-07: Verify Persistent Disks
- Go to Compute Engine -> Storage -> Disks
- Search for `8GB` Persistent Disk
- **Observation:** You should see the disk type **In Use By** updated and bound to **gke-standard-cluster-1-default-pool-db7b638f-j5lk**



## Step-08: Access Application
```t
# List Services
kubectl get svc

# Access Application
http://<ExternalIP-from-get-service-output>
Username: admin101
Password: password101

Observation:
1. You should see admin102 already present.
2. This is because in previous demo, we already created admin102 and that data disk we have mounted here using "Static Provisioning PV" concept.
```

## Step-09: Clean-Up
```t
# Delete Kubernetes Objects
kubectl delete -f kube-manifests/

# List PVC
kubectl get pvc

# List PV
kubectl get pv

# Delete Persistent Disk: preexisting-pd
1. "preexisting-pd" will not get deleted automatically
2. We should manually delete it 
3. We should observe that its "In Use By" field is empty (Not associated to anything)
4. Go to Compute Engine -> Disks -> preexisting-pd -> DELETE 
```



================================================
FILE: 24-GKE-PD-preexisting-PD/kube-manifests/00-persistent-volume.yaml
================================================
apiVersion: v1
kind: PersistentVolume
metadata:
  name: preexisting-pd
spec:
  storageClassName: standard-rwo
  capacity:
    storage: 8Gi
  accessModes:
    - ReadWriteOnce
  claimRef:
    namespace: default
    name: mysql-pv-claim
  gcePersistentDisk:
    pdName: preexisting-pd
    fsType: ext4

================================================
FILE: 24-GKE-PD-preexisting-PD/kube-manifests/01-persistent-volume-claim.yaml
================================================
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec: 
  accessModes:
    - ReadWriteOnce
  storageClassName: standard-rwo
  resources: 
    requests:
      storage: 8Gi

# NEED FOR PVC
# 1. Dynamic volume provisioning allows storage volumes to be created 
# on-demand. 

# 2. Without dynamic provisioning, cluster administrators have to manually 
# make calls to their cloud or storage provider to create new storage 
# volumes, and then create PersistentVolume objects to represent them in k8s

# 3. The dynamic provisioning feature eliminates the need for cluster 
# administrators to pre-provision storage. Instead, it automatically 
# provisions storage when it is requested by users.

# 4. PVC: Users request dynamically provisioned storage by including 
# a storage class in their PersistentVolumeClaim



================================================
FILE: 24-GKE-PD-preexisting-PD/kube-manifests/02-UserManagement-ConfigMap.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
  name: usermanagement-dbcreation-script
data: 
  mysql_usermgmt.sql: |-
    DROP DATABASE IF EXISTS webappdb;
    CREATE DATABASE webappdb; 


# CONFIG MAP
# 1. A ConfigMap is an API object used to store non-confidential data in 
# key-value pairs. 

# 2. Pods can consume ConfigMaps as 
## 2.1: environment variables, 
## 2.2: command-line arguments, 
## 2.3: or as configuration files in a volume. We are going to use this in our MySQL Deployment)  


================================================
FILE: 24-GKE-PD-preexisting-PD/kube-manifests/03-mysql-deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate 
  template: 
    metadata: 
      labels: 
        app: mysql
    spec: 
      containers:
        - name: mysql
          image: mysql:8.0
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: dbpassword11
          ports:
            - containerPort: 3306
              name: mysql    
          volumeMounts:
            - name: mysql-persistent-storage
              mountPath: /var/lib/mysql    
            - name: usermanagement-dbcreation-script
              mountPath: /docker-entrypoint-initdb.d #https://hub.docker.com/_/mysql Refer Initializing a fresh instance                                            
      volumes: 
        - name: mysql-persistent-storage
          persistentVolumeClaim:
            claimName: mysql-pv-claim
        - name: usermanagement-dbcreation-script
          configMap:
            name: usermanagement-dbcreation-script



================================================
FILE: 24-GKE-PD-preexisting-PD/kube-manifests/04-mysql-clusterip-service.yaml
================================================
apiVersion: v1
kind: Service
metadata: 
  name: mysql
spec:
  selector:
    app: mysql 
  ports: 
    - port: 3306  
  clusterIP: None # This means we are going to use Pod IP    

================================================
FILE: 24-GKE-PD-preexisting-PD/kube-manifests/05-UserMgmtWebApp-Deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment 
metadata:
  name: usermgmt-webapp
  labels:
    app: usermgmt-webapp
spec:
  replicas: 1
  selector:
    matchLabels:
      app: usermgmt-webapp
  template:  
    metadata:
      labels: 
        app: usermgmt-webapp
    spec:
      initContainers:
        - name: init-db
          image: busybox:1.31
          command: ['sh', '-c', 'echo -e "Checking for the availability of MySQL Server deployment"; while ! nc -z mysql 3306; do sleep 1; printf "-"; done; echo -e "  >> MySQL DB Server has started";']      
      containers:
        - name: usermgmt-webapp
          image: stacksimplify/kube-usermgmt-webapp:1.0.0-MySQLDB
          imagePullPolicy: Always
          ports: 
            - containerPort: 8080           
          env:
            - name: DB_HOSTNAME
              value: "mysql"            
            - name: DB_PORT
              value: "3306"            
            - name: DB_NAME
              value: "webappdb"            
            - name: DB_USERNAME
              value: "root"            
            - name: DB_PASSWORD
              value: "dbpassword11"            

================================================
FILE: 24-GKE-PD-preexisting-PD/kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml
================================================
apiVersion: v1
kind: Service
metadata:
  name: usermgmt-webapp-lb-service
  labels: 
    app: usermgmt-webapp
spec: 
  type: LoadBalancer
  selector: 
    app: usermgmt-webapp
  ports: 
    - port: 80 # Service Port
      targetPort: 8080 # Container Port

================================================
FILE: 25-GKE-PD-Regional-PD/README.md
================================================
---
title: GKE Persistent Disks - Use Regional PD
description: Use Google Disks Regional PD for Kubernetes Workloads
---

## Step-00: Pre-requisites
1. Verify if GKE Cluster is created
2. Verify if kubeconfig for kubectl is configured in your local terminal
```t
# Configure kubeconfig for kubectl
gcloud container clusters get-credentials <CLUSTER-NAME> --region <REGION> --project <PROJECT>

# Replace Values CLUSTER-NAME, ZONE, PROJECT
gcloud container clusters get-credentials standard-cluster-private-1 --region us-central1 --project kdaida123
```
3. Feature: Compute Engine persistent disk CSI Driver
  - Verify the Feature **Compute Engine persistent disk CSI Driver** enabled in GKE Cluster. 
  - This is required for mounting the Google Compute Engine Persistent Disks to Kubernetes Workloads in GKE Cluster.


## Step-01: Introduction
- Use Regional Persistent Disks

## Step-02: List Kubernetes Storage Classes in GKE Cluster
```t
# List Storage Classes
kubectl get sc
```

## Step-03: 00-storage-class.yaml
```yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: regionalpd-storageclass
provisioner: pd.csi.storage.gke.io
parameters:
  #type: pd-standard # Note: To use regional persistent disks of type pd-standard, set the PersistentVolumeClaim.storage attribute to 200Gi or higher. If you need a smaller persistent disk, use pd-ssd instead of pd-standard.
  type: pd-ssd 
  replication-type: regional-pd
volumeBindingMode: WaitForFirstConsumer
allowedTopologies:
- matchLabelExpressions:
  - key: topology.gke.io/zone
    values:
    - us-central1-c
    - us-central1-b

## Important Note - Regional PD 
# If using a regional cluster, you can leave allowedTopologies unspecified. If you do this, when you create a Pod that consumes a PersistentVolumeClaim which uses this StorageClass a regional persistent disk is provisioned with two zones. One zone is the same as the zone that the Pod is scheduled in. The other zone is randomly picked from the zones available to the cluster.
# When using a zonal cluster, allowedTopologies must be set.    

# STORAGE CLASS 
# 1. A StorageClass provides a way for administrators 
# to describe the "classes" of storage they offer.
# 2. Here we are offering GCP PD Storage for GKE Cluster
```

## Step-04: 01-persistent-volume-claim.yaml
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec: 
  accessModes:
    - ReadWriteOnce
  storageClassName: regionalpd-storageclass
  resources: 
    requests:
      storage: 4Gi
```

## Step-05: Other Kubernetes YAML Manifests
- No changes to other Kubernetes YAML Manifests
- They are same as previous section
- 02-UserManagement-ConfigMap.yaml
- 03-mysql-deployment.yaml
- 04-mysql-clusterip-service.yaml
- 05-UserMgmtWebApp-Deployment.yaml
- 06-UserMgmtWebApp-LoadBalancer-Service.yaml


## Step-06: Deploy kube-manifests
```t
# Deploy Kubernetes Manifests
kubectl apply -f kube-manifests/

# List Storage Class
kubectl get sc

# List PVC
kubectl get pvc

# List PV
kubectl get pv

# List ConfigMaps
kubectl get configmap

# List Deployments
kubectl get deploy

# List Pods
kubectl get pods

# List Services
kubectl get svc

# Verify Pod Logs
kubectl get pods
kubectl logs -f <USERMGMT-POD-NAME>
kubectl logs -f usermgmt-webapp-6ff7d7d849-7lrg5
```

## Step-07: Verify Persistent Disks
- Go to Compute Engine -> Storage -> Disks
- Search for `4GB` Persistent Disk
- **Observation:** Review the below items
  - **Zones:** us-central1-b, us-central1-c
  - **Type:** Regional SSD persistent disk
  - **In use by:** gke-standard-cluster-1-default-pool-db7b638f-j5lk



## Step-08: Access Application
```t
# List Services
kubectl get svc

# Access Application
http://<ExternalIP-from-get-service-output>
Username: admin101
Password: password101
```

## Step-09: Clean-Up
```t
# Delete Kubernetes Objects
kubectl delete -f kube-manifests/

# Verify if PD is deleted
Go to Compute Engine -> Disks -> Search for 4GB Regional SSD persistent disk.
It should be deleted. 
```



## References 
- [Regional PD](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/regional-pd)

================================================
FILE: 25-GKE-PD-Regional-PD/kube-manifests/00-storage-class.yaml
================================================
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: regionalpd-storageclass
provisioner: pd.csi.storage.gke.io
parameters:
  #type: pd-standard # Note: To use regional persistent disks of type pd-standard, set the PersistentVolumeClaim.storage attribute to 200Gi or higher. If you need a smaller persistent disk, use pd-ssd instead of pd-standard.
  type: pd-ssd 
  replication-type: regional-pd
volumeBindingMode: WaitForFirstConsumer
#allowedTopologies:  ##-->COMMENTED BECAUSE WE ARE USING REGIONAL GKE CLUSTER
#- matchLabelExpressions:
#  - key: topology.gke.io/zone
#    values:
#    - us-central1-c
#    - us-central1-b

## Important Note - Regional PD 
# 1. If using a regional GKE cluster, you can leave allowedTopologies unspecified. 
# 2. If you do this, when you create a Pod that consumes a 
#PersistentVolumeClaim which uses this StorageClass a regional persistent 
#disk is provisioned with two zones. One zone is the same as the zone 
#that the Pod is scheduled in. The other zone is randomly picked from 
#the zones available to the cluster.
# 3. When using a zonal cluster, allowedTopologies must be set.    

# STORAGE CLASS 
# 1. A StorageClass provides a way for administrators 
# to describe the "classes" of storage they offer.
# 2. Here we are offering GCP PD Storage for GKE Cluster

================================================
FILE: 25-GKE-PD-Regional-PD/kube-manifests/01-persistent-volume-claim.yaml
================================================
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec: 
  accessModes:
    - ReadWriteOnce
  storageClassName: regionalpd-storageclass
  resources: 
    requests:
      storage: 4Gi

# NEED FOR PVC
# 1. Dynamic volume provisioning allows storage volumes to be created 
# on-demand. 

# 2. Without dynamic provisioning, cluster administrators have to manually 
# make calls to their cloud or storage provider to create new storage 
# volumes, and then create PersistentVolume objects to represent them in k8s

# 3. The dynamic provisioning feature eliminates the need for cluster 
# administrators to pre-provision storage. Instead, it automatically 
# provisions storage when it is requested by users.

# 4. PVC: Users request dynamically provisioned storage by including 
# a storage class in their PersistentVolumeClaim



================================================
FILE: 25-GKE-PD-Regional-PD/kube-manifests/02-UserManagement-ConfigMap.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
  name: usermanagement-dbcreation-script
data: 
  mysql_usermgmt.sql: |-
    DROP DATABASE IF EXISTS webappdb;
    CREATE DATABASE webappdb; 


# CONFIG MAP
# 1. A ConfigMap is an API object used to store non-confidential data in 
# key-value pairs. 

# 2. Pods can consume ConfigMaps as 
## 2.1: environment variables, 
## 2.2: command-line arguments, 
## 2.3: or as configuration files in a volume. We are going to use this in our MySQL Deployment)  


================================================
FILE: 25-GKE-PD-Regional-PD/kube-manifests/03-mysql-deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate 
  template: 
    metadata: 
      labels: 
        app: mysql
    spec: 
      containers:
        - name: mysql
          image: mysql:8.0
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: dbpassword11
          ports:
            - containerPort: 3306
              name: mysql    
          volumeMounts:
            - name: mysql-persistent-storage
              mountPath: /var/lib/mysql    
            - name: usermanagement-dbcreation-script
              mountPath: /docker-entrypoint-initdb.d #https://hub.docker.com/_/mysql Refer Initializing a fresh instance                                            
      volumes: 
        - name: mysql-persistent-storage
          persistentVolumeClaim:
            claimName: mysql-pv-claim
        - name: usermanagement-dbcreation-script
          configMap:
            name: usermanagement-dbcreation-script



================================================
FILE: 25-GKE-PD-Regional-PD/kube-manifests/04-mysql-clusterip-service.yaml
================================================
apiVersion: v1
kind: Service
metadata: 
  name: mysql
spec:
  selector:
    app: mysql 
  ports: 
    - port: 3306  
  clusterIP: None # This means we are going to use Pod IP    

================================================
FILE: 25-GKE-PD-Regional-PD/kube-manifests/05-UserMgmtWebApp-Deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment 
metadata:
  name: usermgmt-webapp
  labels:
    app: usermgmt-webapp
spec:
  replicas: 1
  selector:
    matchLabels:
      app: usermgmt-webapp
  template:  
    metadata:
      labels: 
        app: usermgmt-webapp
    spec:
      initContainers:
        - name: init-db
          image: busybox:1.31
          command: ['sh', '-c', 'echo -e "Checking for the availability of MySQL Server deployment"; while ! nc -z mysql 3306; do sleep 1; printf "-"; done; echo -e "  >> MySQL DB Server has started";']      
      containers:
        - name: usermgmt-webapp
          image: stacksimplify/kube-usermgmt-webapp:1.0.0-MySQLDB
          imagePullPolicy: Always
          ports: 
            - containerPort: 8080           
          env:
            - name: DB_HOSTNAME
              value: "mysql"            
            - name: DB_PORT
              value: "3306"            
            - name: DB_NAME
              value: "webappdb"            
            - name: DB_USERNAME
              value: "root"            
            - name: DB_PASSWORD
              value: "dbpassword11"            

================================================
FILE: 25-GKE-PD-Regional-PD/kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml
================================================
apiVersion: v1
kind: Service
metadata:
  name: usermgmt-webapp-lb-service
  labels: 
    app: usermgmt-webapp
spec: 
  type: LoadBalancer
  selector: 
    app: usermgmt-webapp
  ports: 
    - port: 80 # Service Port
      targetPort: 8080 # Container Port

================================================
FILE: 26-GKE-PD-Volume-Snapshots-and-Restore/01-kube-manifests/01-persistent-volume-claim.yaml
================================================
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec: 
  accessModes:
    - ReadWriteOnce
  storageClassName: standard-rwo
  resources: 
    requests:
      storage: 4Gi

# NEED FOR PVC
# 1. Dynamic volume provisioning allows storage volumes to be created 
# on-demand. 

# 2. Without dynamic provisioning, cluster administrators have to manually 
# make calls to their cloud or storage provider to create new storage 
# volumes, and then create PersistentVolume objects to represent them in k8s

# 3. The dynamic provisioning feature eliminates the need for cluster 
# administrators to pre-provision storage. Instead, it automatically 
# provisions storage when it is requested by users.

# 4. PVC: Users request dynamically provisioned storage by including 
# a storage class in their PersistentVolumeClaim



================================================
FILE: 26-GKE-PD-Volume-Snapshots-and-Restore/01-kube-manifests/02-UserManagement-ConfigMap.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
  name: usermanagement-dbcreation-script
data: 
  mysql_usermgmt.sql: |-
    DROP DATABASE IF EXISTS webappdb;
    CREATE DATABASE webappdb; 


# CONFIG MAP
# 1. A ConfigMap is an API object used to store non-confidential data in 
# key-value pairs. 

# 2. Pods can consume ConfigMaps as 
## 2.1: environment variables, 
## 2.2: command-line arguments, 
## 2.3: or as configuration files in a volume. We are going to use this in our MySQL Deployment)  


================================================
FILE: 26-GKE-PD-Volume-Snapshots-and-Restore/01-kube-manifests/03-mysql-deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate 
  template: 
    metadata: 
      labels: 
        app: mysql
    spec: 
      containers:
        - name: mysql
          image: mysql:8.0
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: dbpassword11
          ports:
            - containerPort: 3306
              name: mysql    
          volumeMounts:
            - name: mysql-persistent-storage
              mountPath: /var/lib/mysql    
            - name: usermanagement-dbcreation-script
              mountPath: /docker-entrypoint-initdb.d #https://hub.docker.com/_/mysql Refer Initializing a fresh instance                                            
      volumes: 
        - name: mysql-persistent-storage
          persistentVolumeClaim:
            claimName: mysql-pv-claim
        - name: usermanagement-dbcreation-script
          configMap:
            name: usermanagement-dbcreation-script



================================================
FILE: 26-GKE-PD-Volume-Snapshots-and-Restore/01-kube-manifests/04-mysql-clusterip-service.yaml
================================================
apiVersion: v1
kind: Service
metadata: 
  name: mysql
spec:
  selector:
    app: mysql 
  ports: 
    - port: 3306  
  clusterIP: None # This means we are going to use Pod IP    

================================================
FILE: 26-GKE-PD-Volume-Snapshots-and-Restore/01-kube-manifests/05-UserMgmtWebApp-Deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment 
metadata:
  name: usermgmt-webapp
  labels:
    app: usermgmt-webapp
spec:
  replicas: 1
  selector:
    matchLabels:
      app: usermgmt-webapp
  template:  
    metadata:
      labels: 
        app: usermgmt-webapp
    spec:
      initContainers:
        - name: init-db
          image: busybox:1.31
          command: ['sh', '-c', 'echo -e "Checking for the availability of MySQL Server deployment"; while ! nc -z mysql 3306; do sleep 1; printf "-"; done; echo -e "  >> MySQL DB Server has started";']      
      containers:
        - name: usermgmt-webapp
          image: stacksimplify/kube-usermgmt-webapp:1.0.0-MySQLDB
          imagePullPolicy: Always
          ports: 
            - containerPort: 8080           
          env:
            - name: DB_HOSTNAME
              value: "mysql"            
            - name: DB_PORT
              value: "3306"            
            - name: DB_NAME
              value: "webappdb"            
            - name: DB_USERNAME
              value: "root"            
            - name: DB_PASSWORD
              value: "dbpassword11"            

================================================
FILE: 26-GKE-PD-Volume-Snapshots-and-Restore/01-kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml
================================================
apiVersion: v1
kind: Service
metadata:
  name: usermgmt-webapp-lb-service
  labels: 
    app: usermgmt-webapp
spec: 
  type: LoadBalancer
  selector: 
    app: usermgmt-webapp
  ports: 
    - port: 80 # Service Port
      targetPort: 8080 # Container Port

================================================
FILE: 26-GKE-PD-Volume-Snapshots-and-Restore/02-Volume-Snapshot/01-VolumeSnapshotClass.yaml
================================================
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshotClass
metadata:
  name: my-snapshotclass
driver: pd.csi.storage.gke.io
deletionPolicy: Delete
#parameters: 
#  storage-locations: us-east2

# Optional Note: 
# To use a custom storage location, add a storage-locations parameter to the snapshot class. 
# To use this parameter, your clusters must use version 1.21 or later.




================================================
FILE: 26-GKE-PD-Volume-Snapshots-and-Restore/02-Volume-Snapshot/02-VolumeSnapshot.yaml
================================================
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshot
metadata:
  name: my-snapshot1
spec:
  volumeSnapshotClassName: my-snapshotclass
  source:
    persistentVolumeClaimName: mysql-pv-claim

================================================
FILE: 26-GKE-PD-Volume-Snapshots-and-Restore/03-Volume-Restore/01-restore-pvc.yaml
================================================
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-restore
spec:
  dataSource:
    name: my-snapshot1
    kind: VolumeSnapshot
    apiGroup: snapshot.storage.k8s.io
  storageClassName: standard-rwo
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 4Gi

================================================
FILE: 26-GKE-PD-Volume-Snapshots-and-Restore/03-Volume-Restore/02-mysql-deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate 
  template: 
    metadata: 
      labels: 
        app: mysql
    spec: 
      containers:
        - name: mysql
          image: mysql:8.0
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: dbpassword11
          ports:
            - containerPort: 3306
              name: mysql    
          volumeMounts:
            - name: mysql-persistent-storage
              mountPath: /var/lib/mysql    
            - name: usermanagement-dbcreation-script
              mountPath: /docker-entrypoint-initdb.d #https://hub.docker.com/_/mysql Refer Initializing a fresh instance                                            
      volumes: 
        - name: mysql-persistent-storage
          persistentVolumeClaim:
            #claimName: mysql-pv-claim
            claimName: pvc-restore
        - name: usermanagement-dbcreation-script
          configMap:
            name: usermanagement-dbcreation-script



================================================
FILE: 26-GKE-PD-Volume-Snapshots-and-Restore/README.md
================================================
---
title: GKE Persistent Disks - Volume Snapshots and Restore
description: Use Google Disks Volume Snapshots and Restore Concepts applied for Kubernetes Workloads
---

## Step-00: Pre-requisites
1. Verify if GKE Cluster is created
2. Verify if kubeconfig for kubectl is configured in your local terminal
```t
# Configure kubeconfig for kubectl
gcloud container clusters get-credentials <CLUSTER-NAME> --region <REGION> --project <PROJECT>

# Replace Values CLUSTER-NAME, ZONE, PROJECT
gcloud container clusters get-credentials standard-cluster-private-1 --region us-central1 --project kdaida123
```
3. Feature: Compute Engine persistent disk CSI Driver
  - Verify the Feature **Compute Engine persistent disk CSI Driver** enabled in GKE Cluster. 
  - This is required for mounting the Google Compute Engine Persistent Disks to Kubernetes Workloads in GKE Cluster.

## Step-01: Introduction
1. Deploy UMS WebApp with `01-kube-manifests`
2. Create new User (admin102, admin103)
3. Create Volume Snapshot Kubernetes Objects and Deploy them
4. Delete User (admin102, admin103)
5. Deploy PVC Restore `03-Volume-Restore`
6. Verify if after restore 2 more users what we deleted got restored in our UMS App
7. Clean Up (kubectl delete -R -f <Folder>)

## Step-02:  Kubernetes YAML Manifests
- **Project Folder:** 01-kube-manifests
- No changes to Kubernetes YAML Manifests, same as Section `21-GKE-PD-existing-SC-standard-rwo`
- 01-persistent-volume-claim.yaml
- 02-UserManagement-ConfigMap.yaml
- 03-mysql-deployment.yaml
- 04-mysql-clusterip-service.yaml
- 05-UserMgmtWebApp-Deployment.yaml
- 06-UserMgmtWebApp-LoadBalancer-Service.yaml

## Step-03: Deploy kube-manifests
```t
# Deploy Kubernetes Manifests
kubectl apply -f 01-kube-manifests/

# List Storage Class
kubectl get sc

# List PVC
kubectl get pvc

# List PV
kubectl get pv

# List ConfigMaps
kubectl get configmap

# List Deployments
kubectl get deploy

# List Pods
kubectl get pods

# List Services
kubectl get svc

# Verify Pod Logs
kubectl get pods
kubectl logs -f <USERMGMT-POD-NAME>
kubectl logs -f usermgmt-webapp-6ff7d7d849-7lrg5
```

## Step-04: Verify Persistent Disks
- Go to Compute Engine -> Storage -> Disks
- Search for `4GB` Persistent Disk
- **Observation:** Review the below items
  - **Zones:** us-central1-c
  - **Type:** Balanced persistent disk
  - **In use by:** gke-standard-cluster-1-default-pool-db7b638f-j5lk

## Step-05: Access Application
```t
# List Services
kubectl get svc

# Access Application
http://<ExternalIP-from-get-service-output>
Username: admin101
Password: password101

# Create New User admin102
Username: admin102
Password: password102
First Name: fname102
Last Name: lname102
Email Address: admin102@stacksimplify.com
Social Security Address: ssn102

# Create New User admin103
Username: admin103
Password: password103
First Name: fname103
Last Name: lname103
Email Address: admin103@stacksimplify.com
Social Security Address: ssn103
```

## Step-06: 02-Volume-Snapshot: Create Volume Snapshots
- **Project Folder:** 02-Volume-Snapshot
### Step-06-01: 01-VolumeSnapshotClass.yaml
```yaml
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshotClass
metadata:
  name: my-snapshotclass
driver: pd.csi.storage.gke.io
deletionPolicy: Delete
#parameters: 
#  storage-locations: us-east2

# Optional Note: 
# To use a custom storage location, add a storage-locations parameter to the snapshot class. 
# To use this parameter, your clusters must use version 1.21 or later.
```
### Step-06-02: 02-VolumeSnapshot.yaml
```yaml
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshot
metadata:
  name: my-snapshot1
spec:
  volumeSnapshotClassName: my-snapshotclass
  source:
    persistentVolumeClaimName: mysql-pv-claim
```
### Step-06-03: Deploy Volume Snapshot Kubernetes Manifests
```t
# Deploy Volume Snapshot Kubernetes Manifests
kubectl apply -f 02-Volume-Snapshot/

# List VolumeSnapshotClass
kubectl get volumesnapshotclass

# Describe VolumeSnapshotClass
kubectl describe volumesnapshotclass my-snapshotclass

# List VolumeSnapshot
kubectl get volumesnapshot

# Describe VolumeSnapshot
kubectl describe volumesnapshot my-snapshot1

# Verify the Snapshots
Go to Compute Engine -> Storage -> Snapshots
Observation:
1. You should find the new snapshot created
2. Review the "Creation Time"
3. Review the "Disk Size: 4GB"
```

## Step-07: Delete users admin102, admin103
```t
# List Services
kubectl get svc

# Access Application
http://<ExternalIP-from-get-service-output>
Username: admin101
Password: password101

# Delete Users
admin102
admin103
```


## Step-08: 03-Volume-Restore: Create Volume Restore
- **Project Folder:** 03-Volume-Restore
### Step-08-01: 01-restore-pvc.yaml
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-restore
spec:
  dataSource:
    name: my-snapshot1
    kind: VolumeSnapshot
    apiGroup: snapshot.storage.k8s.io
  storageClassName: standard-rwo
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 4Gi
```
### Step-08-02: 02-mysql-deployment.yaml
- Update Claim Name from `claimName: mysql-pv-claim` to `claimName: pvc-restore` 
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate 
  template: 
    metadata: 
      labels: 
        app: mysql
    spec: 
      containers:
        - name: mysql
          image: mysql:5.6
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: dbpassword11
          ports:
            - containerPort: 3306
              name: mysql    
          volumeMounts:
            - name: mysql-persistent-storage
              mountPath: /var/lib/mysql    
            - name: usermanagement-dbcreation-script
              mountPath: /docker-entrypoint-initdb.d #https://hub.docker.com/_/mysql Refer Initializing a fresh instance                                        
      volumes: 
        - name: mysql-persistent-storage
          persistentVolumeClaim:
            #claimName: mysql-pv-claim
            claimName: pvc-restore
        - name: usermanagement-dbcreation-script
          configMap:
            name: usermanagement-dbcreation-script
```
### Step-08-03: Deploy Volume Restore Kubernetes Manifests
```t
# Deploy Volume Restore Kubernetes Manifests
kubectl apply -f 03-Volume-Restore/

# List PVC
kubectl get pvc

# List PV
kubectl get pv

# List Pods
kubectl get pods

# Restart Deployments (Optional - If ERRORS)
kubectl rollout restart deployment mysql
kubectl rollout restart deployment usermgmt-webapp

# Review Persistent Disk
1. Go to Compute Engine -> Storage -> Disks
2. You should find a new "Balanced persistent disk" created as part of new PVC "pvc-restore"
3. To get the exact Disk name for "pvc-restore" PVC run command "kubectl get pvc"


# Access Application
http://<ExternalIP-from-get-service-output>
Username: admin101
Password: password101
Observation:
1. You should find admin102, admin103 present
2. That proves, we have restored the MySQL Data using VolumeSnapshots and PVC
```

## Step-09: Clean-Up
```t
# Delete All (Disks, Snapshots)
kubectl delete -f 01-kube-manifests -f 02-Volume-Snapshot -f 03-Volume-Restore

# List PVC
kubectl get pvc

# List PV
kubectl get pv

# List VolumeSnapshotClass
kubectl get volumesnapshotclass

# List VolumeSnapshot
kubectl get volumesnapshot

# Verify Persistent Disks
1. Go to Compute Engine -> Storage -> Disks -> REFRESH
2. Two disks created as part of this demo is deleted

# Verify Disk Snapshots
1. Go to Compute Engine -> Storage -> Snapshots -> REFRESH
2. There should not be any snapshot which we created as part of this demo. 
```




================================================
FILE: 27-GKE-PD-Volume-Clone/01-kube-manifests/01-persistent-volume-claim.yaml
================================================
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec: 
  accessModes:
    - ReadWriteOnce
  storageClassName: standard-rwo
  resources: 
    requests:
      storage: 4Gi

# NEED FOR PVC
# 1. Dynamic volume provisioning allows storage volumes to be created 
# on-demand. 

# 2. Without dynamic provisioning, cluster administrators have to manually 
# make calls to their cloud or storage provider to create new storage 
# volumes, and then create PersistentVolume objects to represent them in k8s

# 3. The dynamic provisioning feature eliminates the need for cluster 
# administrators to pre-provision storage. Instead, it automatically 
# provisions storage when it is requested by users.

# 4. PVC: Users request dynamically provisioned storage by including 
# a storage class in their PersistentVolumeClaim



================================================
FILE: 27-GKE-PD-Volume-Clone/01-kube-manifests/02-UserManagement-ConfigMap.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
  name: usermanagement-dbcreation-script
data: 
  mysql_usermgmt.sql: |-
    DROP DATABASE IF EXISTS webappdb;
    CREATE DATABASE webappdb; 


# CONFIG MAP
# 1. A ConfigMap is an API object used to store non-confidential data in 
# key-value pairs. 

# 2. Pods can consume ConfigMaps as 
## 2.1: environment variables, 
## 2.2: command-line arguments, 
## 2.3: or as configuration files in a volume. We are going to use this in our MySQL Deployment)  


================================================
FILE: 27-GKE-PD-Volume-Clone/01-kube-manifests/03-mysql-deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate 
  template: 
    metadata: 
      labels: 
        app: mysql
    spec: 
      containers:
        - name: mysql
          image: mysql:8.0
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: dbpassword11
          ports:
            - containerPort: 3306
              name: mysql    
          volumeMounts:
            - name: mysql-persistent-storage
              mountPath: /var/lib/mysql    
            - name: usermanagement-dbcreation-script
              mountPath: /docker-entrypoint-initdb.d #https://hub.docker.com/_/mysql Refer Initializing a fresh instance                                            
      volumes: 
        - name: mysql-persistent-storage
          persistentVolumeClaim:
            claimName: mysql-pv-claim
        - name: usermanagement-dbcreation-script
          configMap:
            name: usermanagement-dbcreation-script



================================================
FILE: 27-GKE-PD-Volume-Clone/01-kube-manifests/04-mysql-clusterip-service.yaml
================================================
apiVersion: v1
kind: Service
metadata: 
  name: mysql
spec:
  selector:
    app: mysql 
  ports: 
    - port: 3306  
  clusterIP: None # This means we are going to use Pod IP    

================================================
FILE: 27-GKE-PD-Volume-Clone/01-kube-manifests/05-UserMgmtWebApp-Deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment 
metadata:
  name: usermgmt-webapp
  labels:
    app: usermgmt-webapp
spec:
  replicas: 1
  selector:
    matchLabels:
      app: usermgmt-webapp
  template:  
    metadata:
      labels: 
        app: usermgmt-webapp
    spec:
      initContainers:
        - name: init-db
          image: busybox:1.31
          command: ['sh', '-c', 'echo -e "Checking for the availability of MySQL Server deployment"; while ! nc -z mysql 3306; do sleep 1; printf "-"; done; echo -e "  >> MySQL DB Server has started";']      
      containers:
        - name: usermgmt-webapp
          image: stacksimplify/kube-usermgmt-webapp:1.0.0-MySQLDB
          imagePullPolicy: Always
          ports: 
            - containerPort: 8080           
          env:
            - name: DB_HOSTNAME
              value: "mysql"            
            - name: DB_PORT
              value: "3306"            
            - name: DB_NAME
              value: "webappdb"            
            - name: DB_USERNAME
              value: "root"            
            - name: DB_PASSWORD
              value: "dbpassword11"            

================================================
FILE: 27-GKE-PD-Volume-Clone/01-kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml
================================================
apiVersion: v1
kind: Service
metadata:
  name: usermgmt-webapp-lb-service
  labels: 
    app: usermgmt-webapp
spec: 
  type: LoadBalancer
  selector: 
    app: usermgmt-webapp
  ports: 
    - port: 80 # Service Port
      targetPort: 8080 # Container Port

================================================
FILE: 27-GKE-PD-Volume-Clone/02-Use-Cloned-Volume-kube-manifests/01-podpvc-clone.yaml
================================================
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: podpvc-clone
spec:
  dataSource:
    name: mysql-pv-claim # the name of the source PersistentVolumeClaim that you created as part of UMS Web App
    kind: PersistentVolumeClaim
  accessModes:
    - ReadWriteOnce
  storageClassName: standard-rwo  # same as the StorageClass of the source PersistentVolumeClaim.   
  resources:
    requests:
      storage: 4Gi # the amount of storage to request, which must be at least the size of the source PersistentVolumeClaim

================================================
FILE: 27-GKE-PD-Volume-Clone/02-Use-Cloned-Volume-kube-manifests/02-UserManagement-ConfigMap.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
  name: usermanagement-dbcreation-script2
data: 
  mysql_usermgmt.sql: |-
    DROP DATABASE IF EXISTS webappdb;
    CREATE DATABASE webappdb; 


# CONFIG MAP
# 1. A ConfigMap is an API object used to store non-confidential data in 
# key-value pairs. 

# 2. Pods can consume ConfigMaps as 
## 2.1: environment variables, 
## 2.2: command-line arguments, 
## 2.3: or as configuration files in a volume. We are going to use this in our MySQL Deployment)  


================================================
FILE: 27-GKE-PD-Volume-Clone/02-Use-Cloned-Volume-kube-manifests/03-mysql-deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql2
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: mysql2
  strategy:
    type: Recreate 
  template: 
    metadata: 
      labels: 
        app: mysql2
    spec: 
      containers:
        - name: mysql2
          image: mysql:8.0
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: dbpassword11
          ports:
            - containerPort: 3306
              name: mysql    
          volumeMounts:
            - name: mysql-persistent-storage
              mountPath: /var/lib/mysql    
            - name: usermanagement-dbcreation-script
              mountPath: /docker-entrypoint-initdb.d #https://hub.docker.com/_/mysql Refer Initializing a fresh instance                                            
      volumes: 
        - name: mysql-persistent-storage
          persistentVolumeClaim:
            #claimName: mysql-pv-claim
            claimName: podpvc-clone
        - name: usermanagement-dbcreation-script
          configMap:
            name: usermanagement-dbcreation-script2



================================================
FILE: 27-GKE-PD-Volume-Clone/02-Use-Cloned-Volume-kube-manifests/04-mysql-clusterip-service.yaml
================================================
apiVersion: v1
kind: Service
metadata: 
  name: mysql2
spec:
  selector:
    app: mysql2 
  ports: 
    - port: 3306  
  clusterIP: None # This means we are going to use Pod IP    

================================================
FILE: 27-GKE-PD-Volume-Clone/02-Use-Cloned-Volume-kube-manifests/05-UserMgmtWebApp-Deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment 
metadata:
  name: usermgmt-webapp2
  labels:
    app: usermgmt-webapp2
spec:
  replicas: 1
  selector:
    matchLabels:
      app: usermgmt-webapp2
  template:  
    metadata:
      labels: 
        app: usermgmt-webapp2
    spec:
      initContainers:
        - name: init-db
          image: busybox:1.31
          command: ['sh', '-c', 'echo -e "Checking for the availability of MySQL Server deployment"; while ! nc -z mysql2 3306; do sleep 1; printf "-"; done; echo -e "  >> MySQL DB Server has started";']      
      containers:
        - name: usermgmt-webapp2
          image: stacksimplify/kube-usermgmt-webapp:1.0.0-MySQLDB
          imagePullPolicy: Always
          ports: 
            - containerPort: 8080           
          env:
            - name: DB_HOSTNAME
              value: "mysql2"            
            - name: DB_PORT
              value: "3306"            
            - name: DB_NAME
              value: "webappdb"            
            - name: DB_USERNAME
              value: "root"            
            - name: DB_PASSWORD
              value: "dbpassword11"            

================================================
FILE: 27-GKE-PD-Volume-Clone/02-Use-Cloned-Volume-kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml
================================================
apiVersion: v1
kind: Service
metadata:
  name: usermgmt-webapp2-lb-service
  labels: 
    app: usermgmt-webapp2
spec: 
  type: LoadBalancer
  selector: 
    app: usermgmt-webapp2
  ports: 
    - port: 80 # Service Port
      targetPort: 8080 # Container Port

================================================
FILE: 27-GKE-PD-Volume-Clone/03-With-NodeSelectors/01-kube-manifests/01-persistent-volume-claim.yaml
================================================
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec: 
  accessModes:
    - ReadWriteOnce
  storageClassName: standard-rwo
  resources: 
    requests:
      storage: 4Gi

# NEED FOR PVC
# 1. Dynamic volume provisioning allows storage volumes to be created 
# on-demand. 

# 2. Without dynamic provisioning, cluster administrators have to manually 
# make calls to their cloud or storage provider to create new storage 
# volumes, and then create PersistentVolume objects to represent them in k8s

# 3. The dynamic provisioning feature eliminates the need for cluster 
# administrators to pre-provision storage. Instead, it automatically 
# provisions storage when it is requested by users.

# 4. PVC: Users request dynamically provisioned storage by including 
# a storage class in their PersistentVolumeClaim



================================================
FILE: 27-GKE-PD-Volume-Clone/03-With-NodeSelectors/01-kube-manifests/02-UserManagement-ConfigMap.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
  name: usermanagement-dbcreation-script
data: 
  mysql_usermgmt.sql: |-
    DROP DATABASE IF EXISTS webappdb;
    CREATE DATABASE webappdb; 


# CONFIG MAP
# 1. A ConfigMap is an API object used to store non-confidential data in 
# key-value pairs. 

# 2. Pods can consume ConfigMaps as 
## 2.1: environment variables, 
## 2.2: command-line arguments, 
## 2.3: or as configuration files in a volume. We are going to use this in our MySQL Deployment)  


================================================
FILE: 27-GKE-PD-Volume-Clone/03-With-NodeSelectors/01-kube-manifests/03-mysql-deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate 
  template: 
    metadata: 
      labels: 
        app: mysql
    spec: 
      nodeSelector:
        nodetype: db
      containers:
        - name: mysql
          image: mysql:8.0
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: dbpassword11
          ports:
            - containerPort: 3306
              name: mysql    
          volumeMounts:
            - name: mysql-persistent-storage
              mountPath: /var/lib/mysql    
            - name: usermanagement-dbcreation-script
              mountPath: /docker-entrypoint-initdb.d #https://hub.docker.com/_/mysql Refer Initializing a fresh instance                                            
      volumes: 
        - name: mysql-persistent-storage
          persistentVolumeClaim:
            claimName: mysql-pv-claim
        - name: usermanagement-dbcreation-script
          configMap:
            name: usermanagement-dbcreation-script



================================================
FILE: 27-GKE-PD-Volume-Clone/03-With-NodeSelectors/01-kube-manifests/04-mysql-clusterip-service.yaml
================================================
apiVersion: v1
kind: Service
metadata: 
  name: mysql
spec:
  selector:
    app: mysql 
  ports: 
    - port: 3306  
  clusterIP: None # This means we are going to use Pod IP    

================================================
FILE: 27-GKE-PD-Volume-Clone/03-With-NodeSelectors/01-kube-manifests/05-UserMgmtWebApp-Deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment 
metadata:
  name: usermgmt-webapp
  labels:
    app: usermgmt-webapp
spec:
  replicas: 1
  selector:
    matchLabels:
      app: usermgmt-webapp
  template:  
    metadata:
      labels: 
        app: usermgmt-webapp
    spec:
      initContainers:
        - name: init-db
          image: busybox:1.31
          command: ['sh', '-c', 'echo -e "Checking for the availability of MySQL Server deployment"; while ! nc -z mysql 3306; do sleep 1; printf "-"; done; echo -e "  >> MySQL DB Server has started";']      
      containers:
        - name: usermgmt-webapp
          image: stacksimplify/kube-usermgmt-webapp:1.0.0-MySQLDB
          imagePullPolicy: Always
          ports: 
            - containerPort: 8080           
          env:
            - name: DB_HOSTNAME
              value: "mysql"            
            - name: DB_PORT
              value: "3306"            
            - name: DB_NAME
              value: "webappdb"            
            - name: DB_USERNAME
              value: "root"            
            - name: DB_PASSWORD
              value: "dbpassword11"            

================================================
FILE: 27-GKE-PD-Volume-Clone/03-With-NodeSelectors/01-kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml
================================================
apiVersion: v1
kind: Service
metadata:
  name: usermgmt-webapp-lb-service
  labels: 
    app: usermgmt-webapp
spec: 
  type: LoadBalancer
  selector: 
    app: usermgmt-webapp
  ports: 
    - port: 80 # Service Port
      targetPort: 8080 # Container Port

================================================
FILE: 27-GKE-PD-Volume-Clone/03-With-NodeSelectors/02-Use-Cloned-Volume-kube-manifests/01-podpvc-clone.yaml
================================================
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: podpvc-clone
spec:
  dataSource:
    name: mysql-pv-claim # the name of the source PersistentVolumeClaim that you created as part of UMS Web App
    kind: PersistentVolumeClaim
  accessModes:
    - ReadWriteOnce
  storageClassName: standard-rwo  # same as the StorageClass of the source PersistentVolumeClaim.   
  resources:
    requests:
      storage: 4Gi # the amount of storage to request, which must be at least the size of the source PersistentVolumeClaim

================================================
FILE: 27-GKE-PD-Volume-Clone/03-With-NodeSelectors/02-Use-Cloned-Volume-kube-manifests/02-UserManagement-ConfigMap.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
  name: usermanagement-dbcreation-script2
data: 
  mysql_usermgmt.sql: |-
    DROP DATABASE IF EXISTS webappdb;
    CREATE DATABASE webappdb; 


# CONFIG MAP
# 1. A ConfigMap is an API object used to store non-confidential data in 
# key-value pairs. 

# 2. Pods can consume ConfigMaps as 
## 2.1: environment variables, 
## 2.2: command-line arguments, 
## 2.3: or as configuration files in a volume. We are going to use this in our MySQL Deployment)  


================================================
FILE: 27-GKE-PD-Volume-Clone/03-With-NodeSelectors/02-Use-Cloned-Volume-kube-manifests/03-mysql-deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql2
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: mysql2
  strategy:
    type: Recreate 
  template: 
    metadata: 
      labels: 
        app: mysql2
    spec: 
      nodeSelector:
        nodetype: db
      containers:
        - name: mysql2
          image: mysql:8.0
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: dbpassword11
          ports:
            - containerPort: 3306
              name: mysql    
          volumeMounts:
            - name: mysql-persistent-storage
              mountPath: /var/lib/mysql    
            - name: usermanagement-dbcreation-script
              mountPath: /docker-entrypoint-initdb.d #https://hub.docker.com/_/mysql Refer Initializing a fresh instance                                            
      volumes: 
        - name: mysql-persistent-storage
          persistentVolumeClaim:
            #claimName: mysql-pv-claim
            claimName: podpvc-clone
        - name: usermanagement-dbcreation-script
          configMap:
            name: usermanagement-dbcreation-script2



================================================
FILE: 27-GKE-PD-Volume-Clone/03-With-NodeSelectors/02-Use-Cloned-Volume-kube-manifests/04-mysql-clusterip-service.yaml
================================================
apiVersion: v1
kind: Service
metadata: 
  name: mysql2
spec:
  selector:
    app: mysql2 
  ports: 
    - port: 3306  
  clusterIP: None # This means we are going to use Pod IP    

================================================
FILE: 27-GKE-PD-Volume-Clone/03-With-NodeSelectors/02-Use-Cloned-Volume-kube-manifests/05-UserMgmtWebApp-Deployment.yaml
================================================
apiVersion: apps/v1
kind: Deployment 
metadata:
  name: usermgmt-webapp2
  labels:
    app: usermgmt-webapp2
spec:
  replicas: 1
  selector:
    matchLabels:
      app: usermgmt-webapp2
  template:  
    metadata:
      labels: 
        app: usermgmt-webapp2
    spec:
      initContainers:
        - name: init-db
          image: busybox:1.31
          command: ['sh', '-c', 'echo -e "Checking for the availability of MySQL Server deployment"; while ! nc -z mysql2 3306; do sleep 1; printf "-"; done; echo -e "  >> MySQL DB Server has started";']      
      containers:
        - name: usermgmt-webapp2
          image: stacksimplify/kube-usermgmt-webapp:1.0.0-MySQLDB
          imagePullPolicy: Always
          ports: 
            - containerPort: 8080           
          env:
            - name: DB_HOSTNAME
              value: "mysql2"            
            - name: DB_PORT
              value: "3306"            
            - name: DB_NAME
              value: "webappdb"            
            - name: DB_USERNAME
              value: "root"            
            - name: DB_PASSWORD
              value: "dbpassword11"            

================================================
FILE: 27-GKE-PD-Volume-Clone/03-With-NodeSelectors/02-Use-Cloned-Volume-kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml
================================================
apiVersion: v1
kind: Service
metadata:
  name: usermgmt-webapp2-lb-service
  labels: 
    app: usermgmt-webapp2
spec: 
  type: LoadBalancer
  selector: 
    app: usermgmt-webapp2
  ports: 
    - port: 80 # Service Port
      targetPort: 8080 # Container Port

================================================
FILE: 27-GKE-PD-Volume-Clone/README.md
================================================
---
title: GKE Persistent Disks - Volume Clone
description: Use Google Disks Volume Clone for GKE Workloads
---


## Step-00: Pre-requisites
1. Verify if GKE Cluster is created
2. Verify if kubeconfig for kubectl is configured in your local terminal
```t
# Configure kubeconfig for kubectl
gcloud container clusters get-credentials <CLUSTER-NAME> --region <REGION> --project <PROJECT>

# Replace Values CLUSTER-NAME, ZONE, PROJECT
gcloud container clusters get-credentials standard-cluster-private-1 --region us-central1 --project kdaida123
```
3. Feature: Compute Engine persistent disk CSI Driver
  - Verify the Feature **Compute Engine persistent disk CSI Driver** enabled in GKE Cluster. 
  - This is required for mounting the Google Compute Engine Persistent Disks to Kubernetes Workloads in GKE Cluster.


## Step-01: Introduction
- Understand how to implement cloned Disks in GKE

## Step-02:  Kubernetes YAML Manifests
- **Project Folder:** 01-kube-manifests
- No changes to Kubernetes YAML Manifests, same as Section `21-GKE-PD-existing-SC-standard-rwo`
- 01-persistent-volume-claim.yaml
- 02-UserManagement-ConfigMap.yaml
- 03-mysql-deployment.yaml
- 04-mysql-clusterip-service.yaml
- 05-UserMgmtWebApp-Deployment.yaml
- 06-UserMgmtWebApp-LoadBalancer-Service.yaml

## Step-03: Deploy kube-manifests
```t
# Deploy Kubernetes Manifests
kubectl apply -f 01-kube-manifests/

# List Storage Class
kubectl get sc

# List PVC
kubectl get pvc

# List PV
kubectl get pv

# List ConfigMaps
kubectl get configmap

# List Deployments
kubectl get deploy

# List Pods
kubectl get pods

# List Services
kubectl get svc

# Verify Pod Logs
kubectl get pods
kubectl logs -f <USERMGMT-POD-NAME>
kubectl logs -f usermgmt-webapp-6ff7d7d849-7lrg5
```

## Step-04: Verify Persistent Disks
- Go to Compute Engine -> Storage -> Disks
- Search for `4GB` Persistent Disk
- **Observation:** Review the below items
  - **Zones:** us-central1-c
  - **Type:** Balanced persistent disk
  - **In use by:** gke-standard-cluster-1-default-pool-db7b638f-j5lk

## Step-05: Access Application
```t
# List Services
kubectl get svc

# Access Application
http://<ExternalIP-from-get-service-output>
Username: admin101
Password: password101

# Create New User admin102
Username: admin102
Password: password102
First Name: fname102
Last Name: lname102
Email Address: admin102@stacksimplify.com
Social Security Address: ssn102

# Create New User admin103
Username: admin103
Password: password103
First Name: fname103
Last Name: lname103
Email Address: admin103@stacksimplify.com
Social Security Address: ssn103
```

## Step-06: Volume Clone: 01-podpvc-clone.yaml
- **Project Folder:** 02-Use-Cloned-Volume-kube-manifests
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: podpvc-clone
spec:
  dataSource:
    name: mysql-pv-claim # the name of the source PersistentVolumeClaim that you created as part of UMS Web App
    kind: PersistentVolumeClaim
  accessModes:
    - ReadWriteOnce
  storageClassName: standard-rwo  # same as the StorageClass of the source PersistentVolumeClaim.   
  resources:
    requests:
      storage: 4Gi # the amount of storage to request, which must be at least the size of the source PersistentVolumeClaim
```

## Step-07: 03-mysql-deployment.yaml
- **Change-1:** Change the `claimName: mysql-pv-claim` to `claimName: podpvc-clone`
- 
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql2
spec: 
  replicas: 1
  selector:
    matchLabels:
      app: mysql2
  strategy:
    type: Recreate 
  template: 
    metadata: 
      labels: 
        app: mysql2
    spec: 
      containers:
        - name: mysql2
          image: mysql:8.0
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: dbpassword11
          ports:
            - containerPort: 3306
              name: mysql    
          volumeMounts:
            - name: mysql-persistent-storage
              mountPath: /var/lib/mysql    
            - name: usermanagement-dbcreation-script
              mountPath: /docker-entrypoint-initdb.d #https://hub.docker.com/_/mysql Refer Initializing a fresh instance                                         
      volumes: 
        - name: mysql-persistent-storage
          persistentVolumeClaim:
            #claimName: mysql-pv-claim
            claimName: podpvc-clone
        - name: usermanagement-dbcreation-script
          configMap:
            name: usermanagement-dbcreation-script2
```

## Step-08:  Kubernetes YAML Manifests
- **Project Folder:** 02-Use-Cloned-Volume-kube-manifests
- No changes to Kubernetes YAML Manifests, same as Section `21-GKE-PD-existing-SC-standard-rwo`
- For all the resource names and labels append with 2 (Example: mysql to mysql2, usermgmt-webapp to usermgmt-webapp2)
- 02-UserManagement-ConfigMap.yaml
- 03-mysql-deployment.yaml
- 04-mysql-clusterip-service.yaml
- 05-UserMgmtWebApp-Deployment.yaml
- 06-UserMgmtWebApp-LoadBalancer-Service.yaml

## Step-09: Deploy kube-manifests
```t
# Deploy Kubernetes Manifests
kubectl apply -f 02-Use-Cloned-Volume-kube-manifests/

# List Storage Class
kubectl get sc

# List PVC
kubectl get pvc

# List PV
kubectl get pv

# List ConfigMaps
kubectl get configmap

# List Deployments
kubectl get deploy

# List Pods
kubectl get pods

# List Services
kubectl get svc

# Verify Pod Logs
kubectl get pods
kubectl logs -f <USERMGMT-POD-NAME>
kubectl logs -f usermgmt-webapp2-6ff7d7d849-7lrg5
```

## Step-10: Verify Persistent Disks
- Go to Compute Engine -> Storage -> Disks
- Search for `4GB` Persistent Disk
- **Observation:** Review the below items
  - **Type:** Balanced persistent disk
  - **In use by:** gke-standard-cluster-1-default-pool-db7b638f-j5lk

## Step-11: Access Application
```t
# List Services
kubectl get svc

# Access Application
http://<ExternalIP-from-get-service-output>
Username: admin101
Password: password101

Observation:
1. You should see both "admin102" and "admin103" users already present.
2. This is because we have used the cloned disk from "01-kube-manifests"
```

## Step-12: Clean-Up
```t
# Delete Kubernetes Objects
kubectl delete -f 01-kube-manifests -f 02-Use-Cloned-Volume-kube-manifests
```


```t
# Reference
https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/

# Get Nodes
kubectl get nodes 

# Show Node Labels
kubectl get nodes --show-labels

# Label Node
kubectl label nodes <your-node-name> nodetype=db
kubectl label nodes gke-standard-cluster-pri-default-pool-4f7ab141-p0gz nodetype=db

# Show Node Labels
kubectl get nodes --show-labels
```


================================================
FILE: 28-GKE-Storage-with-GCP-CloudSQL-Public/README.md
================================================
---
title: GKE Storage with GCP Cloud SQL - MySQL Public Instance
description: Use GCP Cloud SQL MySQL DB for GKE Workloads
---

## Step-00: Pre-requisites
1. Verify if GKE Cluster is created
2. Verify if kubeconfig for kubectl is configured in your local terminal
```t
# Configure kubeconfig for kubectl
gcloud container clusters get-credentials <CLUSTER-NAME> --zone <ZONE> --project <PROJECT>

# Replace Values CLUSTER-NAME, ZONE, PROJECT
gcloud container clusters get-credentials standard-cluster-1 --zone us-central1-c --project kdaida123
```

## Step-01: Introduction
- GKE Private Cluster 
- GCP Cloud SQL with Public IP and Authorized Network for DB as entire internet (0.0.0.0/0)

## Step-02: Create Google Cloud SQL MySQL Instance
- Go to SQL -> Choose MySQL
- **Instance ID:** ums-db-public-instance
- **Password:** KalyanReddy13
- **Database Version:** MYSQL 8.0
- **Choose a configuration to start with:** Development
- **Choose region and zonal availability**
  - **Region:** US-central1(IOWA)
  - **Zonal availability:** Single Zone
  - **Primary Zone:** us-central1-a
- **Customize your instance**
- **Machine Type**
  - **Machine Type:** LightWeight (1 vCPU, 3.75GB)
- **STORAGE**  
  - **Storage Type:** HDD
  - **Storage Capacity:** 10GB 
  - **Enable automatic storage increases:** CHECKED
- **CONNECTIONS**  
  - **Instance IP Assignment:** 
    - **Private IP:** UNCHECKED
    - **Public IP:** CHECKED
  - **Authorized networks**
    - **Name:** All-Internet
    - **Network:**  0.0.0.0/0     
    - Click on **DONE**
- **DATA PROTECTION**
  - **Automatic Backups:** UNCHECKED
  - **Enable Deletion protection:** UNCHECKED
- **Maintenance:** Leave to defaults
- **Flags:** Leave to defaults
- **Labels:** Leave to defaults
- Click on **CREATE INSTANCE**      

## Step-03: Perform Telnet Test from local desktop
```t
# Telnet Test
telnet <MYSQL-DB-PUBLIC-IP> 3306

# Replace Public IP
telnet 35.184.228.151 3306

## SAMPLE OUTPUT
Kalyans-Mac-mini:25-GKE-Storage-with-GCP-Cloud-SQL kalyanreddy$ telnet 35.184.228.151 3306
Trying 35.184.228.151...
Connected to 151.228.184.35.bc.googleusercontent.com.
Escape character is '^]'.
Q
8.0.26-google?h'Sxcr+?nd'h<a(X`z=mysql_native_password2#08S01Got timeout reading communication packetsConnection closed by foreign host.
Kalyans-Mac-mini:25-GKE-Storage-with-GCP-Cloud-SQL kalyanreddy$
```


## Step-04: Create DB Schema webappdb 
- Go to SQL ->  ums-db-public-instance -> Databases -> **CREATE DATABASE**
- **Database Name:** webappdb
- **Character set:** utf8
- **Collation:** Default collation
- Click on **CREATE**


## Step-05: 01-MySQL-externalName-Service.yaml
- Update Cloud SQL MySQL DB `Public IP` in ExternalName Service
```yaml
apiVersion: v1
kind: Service
metadata:
  name: mysql-externalname-service
spec:
  type: ExternalName
  externalName: 35.184.228.151
```

## Step-06: 02-Kubernetes-Secrets.yaml
```yaml
apiVersion: v1
kind: Secret
metadata:
  name: mysql-db-password
type: Opaque
data: 
  db-password: S2FseWFuUmVkZHkxMw==

# Base64 of KalyanReddy13
# https://www.base64encode.org/
# Base64 of KalyanReddy13 is S2FseWFuUmVkZHkxMw==
```

## Step-07: 03-UserMgmtWebApp-Deployment.yaml
```yaml
apiVersion: apps/v1
kind: Deployment 
metadata:
  name: usermgmt-webapp
  labels:
    app: usermgmt-webapp
spec:
  replicas: 1
  selector:
    matchLabels:
      app: usermgmt-webapp
  template:  
    metadata:
      labels: 
        app: usermgmt-webapp
    spec:
      initContainers:
        - name: init-db
          image: busybox:1.31
          command: ['sh', '-c', 'echo -e "Checking for the availability of MySQL Server deployment"; while ! nc -z mysql-externalname-service 3306; do sleep 1; printf "-"; done; echo -e "  >> MySQL DB Server has started";']      
      containers:
        - name: usermgmt-webapp
          image: stacksimplify/kube-usermgmt-webapp:1.0.0-MySQLDB
          imagePullPolicy: Always
          ports: 
            - containerPort: 8080           
          env:
            - name: DB_HOSTNAME
              value: "mysql-externalname-service"            
            - name: DB_PORT
              value: "3306"            
            - name: DB_NAME
              value: "webappdb"            
            - name: DB_USERNAME
              value: "root"            
            - name: DB_PASSWORD
              valueFrom:
                secretKeyRef:
                  name: mysql-db-password
                  key: db-password   
```

## Step-08: 04-UserMgmtWebApp-LoadBalancer-Service.yaml
```yaml
apiVersion: v1
kind: Service
metadata:
  name: usermgmt-webapp-lb-service
  labels: 
    app: usermgmt-webapp
spec: 
  type: LoadBalancer
  selector: 
    app: usermgmt-webapp
  ports: 
    - port: 80 # Service Port
      targetPort: 8080 # Container Port
```

## Step-09: Deploy kube-manifests
```t
# Deploy Kubernetes Manifests
kubectl apply -f kube-manifests/

# List Deployments
kubectl get deploy

# List Pods
kubectl get pods

# List Services
kubectl get svc

# Verify Pod Logs
kubectl get pods
kubectl logs -f <USERMGMT-POD-NAME>
kubectl logs -f usermgmt-webapp-6ff7d7d849-7lrg5
```


## Step-10: Access Application
```t
# List Services
kubectl get svc

# Access Application
http://<ExternalIP-from-get-service-output>
Username: admin101
Password: password101
```

## Step-11: Connect to MySQL DB (Cloud SQL) from GKE Cluster using kubectl
```t
## Verify from Kubernetes Cluster, we are able to connect to MySQL DB
# Template
kubectl run -it --rm --image=mysql:8.0 --restart=Never mysql-client -- mysql -h <Kubernetes-ExternalName-Service> -u <USER_NAME> -p<PASSWORD>

# MySQL Client 8.0: Replace External Name Service, Username and Password
kubectl run -it --rm --image=mysql:8.0 --restart=Never mysql-client -- mysql -h mysql-externalname-service -u root -pKalyanReddy13

mysql> show schemas;
mysql> use webappdb;
mysql> show tables;
mysql> select * from user;
mysql> exit
```

## Step-12: Create New user admin102 and verify in Cloud SQL MySQL webappdb
```t
# Access Application
http://<ExternalIP-from-g
Download .txt
gitextract_6wj2qqr2/

├── .gitignore
├── 01-Create-GCP-Account/
│   └── README.md
├── 02-Create-GKE-Cluster/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       └── 02-kubernetes-loadbalancer-service.yaml
├── 03-gcloud-cli-install-macos/
│   └── README.md
├── 04-gcloud-cli-install-windowsos/
│   └── README.md
├── 05-Docker-For-Beginners/
│   └── README.md
├── 06-kubectl-imperative-k8s-pods/
│   └── README.md
├── 07-kubectl-declarative-k8s-ReplicaSets/
│   ├── README.md
│   └── replicaset-demo.yml
├── 08-kubectl-imperative-k8s-deployment-CREATE/
│   └── README.md
├── 09-kubectl-imperative-k8s-deployment-UPDATE/
│   └── README.md
├── 10-kubectl-imperative-k8s-deployment-ROLLBACK/
│   └── README.md
├── 11-kubectl-imperative-k8s-deployment-PAUSE-RESUME/
│   └── README.md
├── 12-kubectl-imperative-k8s-services/
│   └── README.md
├── 13-YAML-Basics/
│   ├── README.md
│   ├── sample-file.yml
│   └── yaml-demo.yaml
├── 14-yaml-declarative-k8s-pods/
│   ├── README.md
│   ├── kube-base-definition.yml
│   └── kube-manifests/
│       ├── 01-pod-definition.yml
│       └── 02-pod-LoadBalancer-service.yml
├── 15-yaml-declarative-k8s-replicasets/
│   ├── README.md
│   ├── kube-base-definition.yml
│   └── kube-manifests/
│       ├── 01-replicaset-definition.yml
│       └── 02-replicaset-LoadBalancer-servie.yml
├── 16-yaml-declarative-k8s-deployments/
│   ├── README.md
│   ├── kube-base-definition.yml
│   └── kube-manifests/
│       ├── 01-deployment-definition.yml
│       └── 02-deployment-LoadBalancer-servie.yml
├── 17-yaml-declarative-k8s-services/
│   ├── README.md
│   ├── kube-base-definition.yml
│   └── kube-manifests/
│       ├── 01-backend-deployment.yml
│       ├── 02-backend-clusterip-service.yml
│       ├── 03-frontend-deployment.yml
│       └── 04-frontend-LoadBalancer-service.yml
├── 18-GKE-NodePort-Service/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       └── 02-kubernetes-nodeport-service.yaml
├── 19-GKE-Headless-Service/
│   ├── 01-kube-manifests/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   ├── 02-kubernetes-clusterip-service.yaml
│   │   └── 03-kubernetes-headless-service.yaml
│   ├── 02-kube-manifests-curl/
│   │   └── 01-curl-pod.yml
│   └── README.md
├── 20-GKE-Private-Cluster/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       └── 02-kubernetes-loadbalancer-service.yaml
├── 21-GKE-PD-existing-SC-standard-rwo/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-persistent-volume-claim.yaml
│       ├── 02-UserManagement-ConfigMap.yaml
│       ├── 03-mysql-deployment.yaml
│       ├── 04-mysql-clusterip-service.yaml
│       ├── 05-UserMgmtWebApp-Deployment.yaml
│       └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 22-GKE-PD-existing-SC-premium-rwo/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-persistent-volume-claim.yaml
│       ├── 02-UserManagement-ConfigMap.yaml
│       ├── 03-mysql-deployment.yaml
│       ├── 04-mysql-clusterip-service.yaml
│       ├── 05-UserMgmtWebApp-Deployment.yaml
│       └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 23-GKE-PD-Custom-StorageClass/
│   ├── README.md
│   └── kube-manifests/
│       ├── 00-storage-class.yaml
│       ├── 01-persistent-volume-claim.yaml
│       ├── 02-UserManagement-ConfigMap.yaml
│       ├── 03-mysql-deployment.yaml
│       ├── 04-mysql-clusterip-service.yaml
│       ├── 05-UserMgmtWebApp-Deployment.yaml
│       └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 24-GKE-PD-preexisting-PD/
│   ├── README.md
│   └── kube-manifests/
│       ├── 00-persistent-volume.yaml
│       ├── 01-persistent-volume-claim.yaml
│       ├── 02-UserManagement-ConfigMap.yaml
│       ├── 03-mysql-deployment.yaml
│       ├── 04-mysql-clusterip-service.yaml
│       ├── 05-UserMgmtWebApp-Deployment.yaml
│       └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 25-GKE-PD-Regional-PD/
│   ├── README.md
│   └── kube-manifests/
│       ├── 00-storage-class.yaml
│       ├── 01-persistent-volume-claim.yaml
│       ├── 02-UserManagement-ConfigMap.yaml
│       ├── 03-mysql-deployment.yaml
│       ├── 04-mysql-clusterip-service.yaml
│       ├── 05-UserMgmtWebApp-Deployment.yaml
│       └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 26-GKE-PD-Volume-Snapshots-and-Restore/
│   ├── 01-kube-manifests/
│   │   ├── 01-persistent-volume-claim.yaml
│   │   ├── 02-UserManagement-ConfigMap.yaml
│   │   ├── 03-mysql-deployment.yaml
│   │   ├── 04-mysql-clusterip-service.yaml
│   │   ├── 05-UserMgmtWebApp-Deployment.yaml
│   │   └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   ├── 02-Volume-Snapshot/
│   │   ├── 01-VolumeSnapshotClass.yaml
│   │   └── 02-VolumeSnapshot.yaml
│   ├── 03-Volume-Restore/
│   │   ├── 01-restore-pvc.yaml
│   │   └── 02-mysql-deployment.yaml
│   └── README.md
├── 27-GKE-PD-Volume-Clone/
│   ├── 01-kube-manifests/
│   │   ├── 01-persistent-volume-claim.yaml
│   │   ├── 02-UserManagement-ConfigMap.yaml
│   │   ├── 03-mysql-deployment.yaml
│   │   ├── 04-mysql-clusterip-service.yaml
│   │   ├── 05-UserMgmtWebApp-Deployment.yaml
│   │   └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   ├── 02-Use-Cloned-Volume-kube-manifests/
│   │   ├── 01-podpvc-clone.yaml
│   │   ├── 02-UserManagement-ConfigMap.yaml
│   │   ├── 03-mysql-deployment.yaml
│   │   ├── 04-mysql-clusterip-service.yaml
│   │   ├── 05-UserMgmtWebApp-Deployment.yaml
│   │   └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   ├── 03-With-NodeSelectors/
│   │   ├── 01-kube-manifests/
│   │   │   ├── 01-persistent-volume-claim.yaml
│   │   │   ├── 02-UserManagement-ConfigMap.yaml
│   │   │   ├── 03-mysql-deployment.yaml
│   │   │   ├── 04-mysql-clusterip-service.yaml
│   │   │   ├── 05-UserMgmtWebApp-Deployment.yaml
│   │   │   └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   │   └── 02-Use-Cloned-Volume-kube-manifests/
│   │       ├── 01-podpvc-clone.yaml
│   │       ├── 02-UserManagement-ConfigMap.yaml
│   │       ├── 03-mysql-deployment.yaml
│   │       ├── 04-mysql-clusterip-service.yaml
│   │       ├── 05-UserMgmtWebApp-Deployment.yaml
│   │       └── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   └── README.md
├── 28-GKE-Storage-with-GCP-CloudSQL-Public/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-MySQL-externalName-Service.yaml
│       ├── 02-Kubernetes-Secrets.yaml
│       ├── 03-UserMgmtWebApp-Deployment.yaml
│       └── 04-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 29-GKE-Storage-with-GCP-CloudSQL-Private/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-MySQL-externalName-Service.yaml
│       ├── 02-Kubernetes-Secrets.yaml
│       ├── 03-UserMgmtWebApp-Deployment.yaml
│       └── 04-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 30-GCP-CloudSQL-Private-NO-ExternalNameService/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Kubernetes-Secrets.yaml
│       ├── 02-UserMgmtWebApp-Deployment.yaml
│       └── 03-UserMgmtWebApp-LoadBalancer-Service.yaml
├── 31-GKE-FileStore-default-StorageClass/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-filestore-pvc.yaml
│       ├── 02-write-to-filestore-pod.yaml
│       ├── 03-myapp1-deployment.yaml
│       └── 04-loadBalancer-service.yaml
├── 32-GKE-FileStore-custom-StorageClass/
│   ├── README.md
│   └── kube-manifests/
│       ├── 00-filestore-storage-class.yaml
│       ├── 01-filestore-pvc.yaml
│       ├── 02-write-to-filestore-pod.yaml
│       ├── 03-myapp1-deployment.yaml
│       └── 04-loadBalancer-service.yaml
├── 33-GKE-FileStore-Backup-and-Restore/
│   ├── 01-myapp1-kube-manifests/
│   │   ├── 01-filestore-pvc.yaml
│   │   ├── 02-write-to-filestore-pod.yaml
│   │   ├── 03-myapp1-deployment.yaml
│   │   └── 04-loadBalancer-service.yaml
│   ├── 02-volume-backup-kube-manifests/
│   │   ├── 01-VolumeSnapshotClass.yaml
│   │   └── 02-VolumeSnapshot.yaml
│   ├── 03-volume-restore-myapp2-kube-manifests/
│   │   ├── 01-filestore-pvc.yaml
│   │   ├── 02-myapp2-deployment.yaml
│   │   └── 03-myapp2-loadBalancer-service.yaml
│   └── README.md
├── 34-GKE-Ingress-Basics/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App3-Deployment-and-NodePortService.yaml
│       └── 02-ingress-basic.yaml
├── 35-GKE-Ingress-Context-Path-Routing/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       └── 04-Ingress-ContextPath-Based-Routing.yaml
├── 36-GKE-Ingress-Custom-Health-Check/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       └── 04-Ingress-Custom-Healthcheck.yaml
├── 37-Google-Cloud-Domains/
│   └── README.md
├── 38-GKE-Ingress-ExternalIP/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       └── 04-Ingress-external-ip.yaml
├── 39-GKE-Ingress-Google-Managed-SSL/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       ├── 04-Ingress-SSL.yaml
│       └── 05-Managed-Certificate.yaml
├── 40-GKE-Ingress-Google-Managed-SSL-Redirect/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       ├── 04-Ingress-SSL.yaml
│       ├── 05-Managed-Certificate.yaml
│       └── 06-frontendconfig.yaml
├── 41-GKE-Workload-Identity/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-wid-demo-pod-without-sa.yaml
│       └── 02-wid-demo-pod-with-sa.yaml
├── 42-GKE-ExternalDNS-Install/
│   └── README.md
├── 43-GKE-ExternalDNS-Ingress-Demo/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App3-Deployment-and-NodePortService.yaml
│       └── 02-ingress-external-dns.yaml
├── 44-GKE-ExternalDNS-Service-Demo/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       └── 02-kubernetes-loadbalancer-service.yaml
├── 45-GKE-Ingress-NameBasedVhost-Routing/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       ├── 04-Ingress-NameBasedVHost-Routing.yaml
│       ├── 05-Managed-Certificate.yaml
│       └── 06-frontendconfig.yaml
├── 46-GKE-Ingress-SSL-Policy/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       ├── 04-Ingress-NameBasedVHost-Routing.yaml
│       ├── 05-Managed-Certificate.yaml
│       └── 06-frontendconfig.yaml
├── 47-GKE-Ingress-with-Identity-Aware-Proxy/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       ├── 04-Ingress-NameBasedVHost-Routing.yaml
│       ├── 05-Managed-Certificate.yaml
│       ├── 06-frontendconfig.yaml
│       └── 07-backendconfig.yaml
├── 48-GKE-Ingress-SelfSigned-SSL/
│   ├── README.md
│   ├── SSL-SelfSigned-Certs/
│   │   ├── app1-ingress.crt
│   │   ├── app1-ingress.csr
│   │   ├── app1-ingress.key
│   │   ├── app2-ingress.crt
│   │   ├── app2-ingress.csr
│   │   ├── app2-ingress.key
│   │   ├── app3-ingress.crt
│   │   ├── app3-ingress.csr
│   │   └── app3-ingress.key
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       ├── 04-ingress-self-signed-ssl.yaml
│       └── 05-frontendconfig.yaml
├── 49-GKE-Ingress-Preshared-SSL/
│   ├── README.md
│   ├── SSL-SelfSigned-Certs/
│   │   ├── app1-ingress.crt
│   │   ├── app1-ingress.csr
│   │   ├── app1-ingress.key
│   │   ├── app2-ingress.crt
│   │   ├── app2-ingress.csr
│   │   ├── app2-ingress.key
│   │   ├── app3-ingress.crt
│   │   ├── app3-ingress.csr
│   │   └── app3-ingress.key
│   └── kube-manifests/
│       ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│       ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│       ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│       ├── 04-ingress-preshared-ssl.yaml
│       └── 05-frontendconfig.yaml
├── 50-GKE-Ingress-Cloud-CDN/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       ├── 02-kubernetes-NodePort-service.yaml
│       ├── 03-ingress.yaml
│       └── 04-backendconfig.yaml
├── 51-GKE-Ingress-ClientIP-Affinity/
│   ├── 01-kube-manifests-with-clientip-affinity/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   ├── 02-kubernetes-NodePort-service.yaml
│   │   ├── 03-ingress.yaml
│   │   └── 04-backendconfig.yaml
│   ├── 02-kube-manifests-without-clientip-affinity/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   ├── 02-kubernetes-NodePort-service.yaml
│   │   ├── 03-ingress.yaml
│   │   └── 04-backendconfig.yaml
│   └── README.md
├── 52-GKE-Ingress-Cookie-Affinity/
│   ├── 01-kube-manifests-with-cookie-affinity/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   ├── 02-kubernetes-NodePort-service.yaml
│   │   ├── 03-ingress.yaml
│   │   └── 04-backendconfig.yaml
│   ├── 02-kube-manifests-without-cookie-affinity/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   ├── 02-kubernetes-NodePort-service.yaml
│   │   ├── 03-ingress.yaml
│   │   └── 04-backendconfig.yaml
│   └── README.md
├── 53-GKE-Ingress-HealthCheck-with-backendConfig/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       ├── 02-kubernetes-NodePort-service.yaml
│       ├── 03-ingress.yaml
│       └── 04-backendconfig.yaml
├── 54-GKE-Ingress-InternalLB/
│   ├── 01-kube-manifests/
│   │   ├── 01-Nginx-App1-Deployment-and-NodePortService.yaml
│   │   ├── 02-Nginx-App2-Deployment-and-NodePortService.yaml
│   │   ├── 03-Nginx-App3-Deployment-and-NodePortService.yaml
│   │   └── 04-Ingress-internal-lb.yaml
│   ├── 02-kube-manifests-curl/
│   │   └── 01-curl-pod.yml
│   └── README.md
├── 55-GKE-Ingress-Cloud-Armor/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       ├── 02-kubernetes-NodePort-service.yaml
│       ├── 03-ingress.yaml
│       └── 04-backendconfig.yaml
├── 56-GKE-Artifact-Registry/
│   ├── 01-Docker-Image/
│   │   ├── Dockerfile
│   │   └── index.html
│   ├── 02-kube-manifests/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   └── 02-kubernetes-loadBalancer-service.yaml
│   └── README.md
├── 57-GKE-Continuous-Integration/
│   ├── 01-SSH-Keys/
│   │   ├── id_gcp_cloud_source
│   │   └── id_gcp_cloud_source.pub
│   ├── 02-Docker-Image/
│   │   ├── Dockerfile
│   │   └── index.html
│   ├── 03-cloudbuild-yaml/
│   │   └── cloudbuild.yaml
│   ├── 04-kube-manifests/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   └── 02-kubernetes-loadBalancer-service.yaml
│   └── README.md
├── 58-GKE-Continuous-Delivery-with-CloudBuild/
│   ├── 01-myapp1-k8s-repo/
│   │   └── cloudbuild-delivery.yaml
│   ├── 02-Source-Writer-IAM-Role/
│   │   └── myapp1-k8s-repo-policy.yaml
│   ├── 03-myapp1-app-repo/
│   │   ├── Dockerfile
│   │   ├── README.md
│   │   ├── cloudbuild-trigger-cd.yaml
│   │   ├── cloudbuild.yaml
│   │   ├── index.html
│   │   └── kubernetes.yaml.tpl
│   └── README.md
├── 59-Kubernetes-liveness-probe/
│   ├── 01-liveness-probe-linux-command/
│   │   ├── 01-persistent-volume-claim.yaml
│   │   ├── 02-UserManagement-ConfigMap.yaml
│   │   ├── 03-mysql-deployment.yaml
│   │   ├── 04-mysql-clusterip-service.yaml
│   │   ├── 05-UserMgmtWebApp-Deployment.yaml
│   │   ├── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   │   └── 07-kubernetes-secret.yaml
│   ├── 02-liveness-probe-HTTP-Request/
│   │   ├── 01-persistent-volume-claim.yaml
│   │   ├── 02-UserManagement-ConfigMap.yaml
│   │   ├── 03-mysql-deployment.yaml
│   │   ├── 04-mysql-clusterip-service.yaml
│   │   ├── 05-UserMgmtWebApp-Deployment.yaml
│   │   ├── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   │   └── 07-kubernetes-secret.yaml
│   ├── 03-liveness-probe-TCP-Request/
│   │   ├── 01-persistent-volume-claim.yaml
│   │   ├── 02-UserManagement-ConfigMap.yaml
│   │   ├── 03-mysql-deployment.yaml
│   │   ├── 04-mysql-clusterip-service.yaml
│   │   ├── 05-UserMgmtWebApp-Deployment.yaml
│   │   ├── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│   │   └── 07-kubernetes-secret.yaml
│   └── README.md
├── 60-Kubernetes-Startup-Probe/
│   ├── README.md
│   └── kube-manifests-startup-probe/
│       ├── 01-persistent-volume-claim.yaml
│       ├── 02-UserManagement-ConfigMap.yaml
│       ├── 03-mysql-deployment.yaml
│       ├── 04-mysql-clusterip-service.yaml
│       ├── 05-UserMgmtWebApp-Deployment.yaml
│       ├── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│       └── 07-kubernetes-secret.yaml
├── 61-Kubernetes-Readiness-Probe/
│   ├── README.md
│   └── kube-manifests-readiness-probe/
│       ├── 01-persistent-volume-claim.yaml
│       ├── 02-UserManagement-ConfigMap.yaml
│       ├── 03-mysql-deployment.yaml
│       ├── 04-mysql-clusterip-service.yaml
│       ├── 05-UserMgmtWebApp-Deployment.yaml
│       ├── 06-UserMgmtWebApp-LoadBalancer-Service.yaml
│       └── 07-kubernetes-secret.yaml
├── 62-Kubernetes-Requests-and-Limits/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       └── 02-kubernetes-loadbalancer-service.yaml
├── 63-GKE-Cluster-Autoscaling/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       └── 02-kubernetes-loadbalancer-service.yaml
├── 64-Kubernetes-Namespaces/
│   ├── 01-kube-manifests-imperative/
│   │   ├── 01-kubernetes-deployment.yaml
│   │   └── 02-kubernetes-loadbalancer-service.yaml
│   ├── 02-kube-manifests-declarative/
│   │   ├── 00-kubernetes-namespace.yaml
│   │   ├── 01-kubernetes-deployment.yaml
│   │   └── 02-kubernetes-loadbalancer-service.yaml
│   └── README.md
├── 65-Kubernetes-Namespaces-ResourceQuota/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-namespace.yaml
│       ├── 02-kubernetes-resourcequota.yaml
│       ├── 03-kubernetes-deployment.yaml
│       └── 04-kubernetes-loadbalancer-service.yaml
├── 66-Kubernetes-Namespaces-LimitRange/
│   ├── 01-kube-manifests-LimitRange-defaults/
│   │   ├── 01-kubernetes-namespace.yaml
│   │   ├── 02-kubernetes-resourcequota-limitrange.yaml
│   │   ├── 03-kubernetes-deployment.yaml
│   │   └── 04-kubernetes-loadbalancer-service.yaml
│   ├── 02-kube-manifests-LimitRange-MinMax/
│   │   ├── 01-kubernetes-namespace.yaml
│   │   ├── 02-kubernetes-resourcequota-limitrange.yaml
│   │   ├── 03-kubernetes-deployment.yaml
│   │   └── 04-kubernetes-loadbalancer-service.yaml
│   └── README.md
├── 67-GKE-Horizontal-Pod-Autoscaler/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       ├── 02-kubernetes-cip-service.yaml
│       └── 03-kubernetes-hpa.yaml
├── 68-GKE-AutoPilot-Cluster/
│   ├── README.md
│   └── kube-manifests/
│       ├── 01-kubernetes-deployment.yaml
│       └── 02-kubernetes-loadbalancer-service.yaml
├── 69-Access-To-Multiple-Clusters/
│   └── README.md
├── README.md
├── course-presentation/
│   └── Google-Kubernetes-Engine-GKE-GCP-v3R.pptx
└── git-deploy.sh
Condensed preview — 387 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (635K chars).
[
  {
    "path": ".gitignore",
    "chars": 727,
    "preview": "# Local .terraform directories\n**/.terraform/*\n.DS_Store \n\n# .tfstate files\n*.tfstate\n*.tfstate.*\n\n# Crash log files\ncra"
  },
  {
    "path": "01-Create-GCP-Account/README.md",
    "chars": 503,
    "preview": "---\ntitle: Create GCP Cloud Account\ndescription: Learn to create GCP Cloud Account\n---\n\n## Step-01: Introduction\n- Creat"
  },
  {
    "path": "02-Create-GKE-Cluster/README.md",
    "chars": 7211,
    "preview": "---\ntitle: GCP Google Kubernetes Engine - Create GKE Cluster\ndescription: Learn to create Google Kubernetes Engine GKE C"
  },
  {
    "path": "02-Create-GKE-Cluster/kube-manifests/01-kubernetes-deployment.yaml",
    "chars": 501,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata: #Dictionary\n  name: myapp1-deployment\nspec: # Dictionary\n  replicas: 2\n "
  },
  {
    "path": "02-Create-GKE-Cluster/kube-manifests/02-kubernetes-loadbalancer-service.yaml",
    "chars": 242,
    "preview": "apiVersion: v1\nkind: Service \nmetadata:\n  name: myapp1-lb-service\nspec:\n  type: LoadBalancer # ClusterIp, # NodePort\n  s"
  },
  {
    "path": "03-gcloud-cli-install-macos/README.md",
    "chars": 4687,
    "preview": "---\ntitle: gcloud cli install on macOS\ndescription: Learn to install gcloud cli on MacOS\n---\n\n## Step-01: Introduction\n-"
  },
  {
    "path": "04-gcloud-cli-install-windowsos/README.md",
    "chars": 4124,
    "preview": "---\ntitle: gcloud cli install on macOS\ndescription: Learn to install gcloud cli on WindowsOS\n---\n\n## Step-01: Introducti"
  },
  {
    "path": "05-Docker-For-Beginners/README.md",
    "chars": 226,
    "preview": "---\ntitle: Docker Fundamentals\ndescription: Learn Docker Fundamentals\n---\n\n## Docker Fundamentals\n- For Docker Fundament"
  },
  {
    "path": "06-kubectl-imperative-k8s-pods/README.md",
    "chars": 5989,
    "preview": "---\ntitle: Kubernetes PODs\ndescription: Learn about Kubernetes Pods\n---\n\n## Step-01: PODs Introduction\n- What is a POD ?"
  },
  {
    "path": "07-kubectl-declarative-k8s-ReplicaSets/README.md",
    "chars": 3998,
    "preview": "---\ntitle: Kubernetes ReplicaSets\ndescription: Learn about Kubernetes ReplicaSets\n---\n\n## Step-01: Introduction to Repli"
  },
  {
    "path": "07-kubectl-declarative-k8s-ReplicaSets/replicaset-demo.yml",
    "chars": 357,
    "preview": "apiVersion: apps/v1\nkind: ReplicaSet\nmetadata:\n  name: my-helloworld-rs\n  labels:\n    app: my-helloworld\nspec:\n  replica"
  },
  {
    "path": "08-kubectl-imperative-k8s-deployment-CREATE/README.md",
    "chars": 2872,
    "preview": "---\ntitle: Kubernetes - Deployment\ndescription: Learn and Implement Kubernetes Deployment\n---\n\n## Kubernetes Deployment "
  },
  {
    "path": "09-kubectl-imperative-k8s-deployment-UPDATE/README.md",
    "chars": 4145,
    "preview": "---\ntitle: Kubernetes - Update Deployment\ndescription: Learn and Implement Kubernetes Update Deployment\n---\n## Step-00: "
  },
  {
    "path": "10-kubectl-imperative-k8s-deployment-ROLLBACK/README.md",
    "chars": 2809,
    "preview": "---\ntitle: Kubernetes - Rollback Deployment\ndescription: Learn and Implement Kubernetes Rollback Deployment\n---\n\n## Step"
  },
  {
    "path": "11-kubectl-imperative-k8s-deployment-PAUSE-RESUME/README.md",
    "chars": 2832,
    "preview": "---\ntitle: Kubernetes - Pause & Resume Deployments\ndescription: Implement Kubernetes - Pause & Resume Deployments\n---\n##"
  },
  {
    "path": "12-kubectl-imperative-k8s-services/README.md",
    "chars": 4313,
    "preview": "---\ntitle: Kubernetes Services\ndescription: Learn about Kubernetes ClusterIP and Load Balancer Services\n---\n## Step-01: "
  },
  {
    "path": "13-YAML-Basics/README.md",
    "chars": 1526,
    "preview": "---\ntitle: YAML Basics for Kubernetes\ndescription: Learn YAML Basics\n---\n\n## Step-01: Comments & Key Value Pairs\n- Space"
  },
  {
    "path": "13-YAML-Basics/sample-file.yml",
    "chars": 667,
    "preview": "# Simple Key value Pairs\nperson: # Dictionary\n  name: kalyan\n  age: 23\n  city: Hyderabd\n  hobbies: # List\n    - cooking\n"
  },
  {
    "path": "13-YAML-Basics/yaml-demo.yaml",
    "chars": 694,
    "preview": "# Simple Key Value Pairs\nperson: # Dictionary\n  name: kalyan\n  age: 23\n  city: Hyderabad\n  hobbies: # List \n    - cookin"
  },
  {
    "path": "14-yaml-declarative-k8s-pods/README.md",
    "chars": 2453,
    "preview": "---\ntitle: Kubernetes Pods with YAML\ndescription: Learn to write and test Kubernetes Pods with YAML\n---\n\n## Step-01: Kub"
  },
  {
    "path": "14-yaml-declarative-k8s-pods/kube-base-definition.yml",
    "chars": 292,
    "preview": "apiVersion: \nkind: \nmetadata:\n\nspec:\n    \n# Types of Kubernetes Objects\n# Pod, ReplicaSet, Deployment, Service and many "
  },
  {
    "path": "14-yaml-declarative-k8s-pods/kube-manifests/01-pod-definition.yml",
    "chars": 282,
    "preview": "apiVersion: v1 # String\nkind: Pod # String\nmetadata: # Dictionary\n  name: myapp-pod\n  labels: # Dictionary\n    app: myap"
  },
  {
    "path": "14-yaml-declarative-k8s-pods/kube-manifests/02-pod-LoadBalancer-service.yml",
    "chars": 293,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: myapp-pod-loadbalancer-service\nspec:\n  type: LoadBalancer\n  # Loadbalance"
  },
  {
    "path": "15-yaml-declarative-k8s-replicasets/README.md",
    "chars": 1927,
    "preview": "---\ntitle: Kubernetes ReplicaSets with YAML\ndescription: Learn to write and test Kubernetes ReplicaSets with YAML\n---\n\n#"
  },
  {
    "path": "15-yaml-declarative-k8s-replicasets/kube-base-definition.yml",
    "chars": 41,
    "preview": "apiVersion:\nkind:\nmetadata:\n  \nspec:\n    "
  },
  {
    "path": "15-yaml-declarative-k8s-replicasets/kube-manifests/01-replicaset-definition.yml",
    "chars": 453,
    "preview": "apiVersion: apps/v1\nkind: ReplicaSet  \nmetadata: # Dictionary\n  name: myapp2-rs\nspec: # Dictionary\n  replicas: 3\n  selec"
  },
  {
    "path": "15-yaml-declarative-k8s-replicasets/kube-manifests/02-replicaset-LoadBalancer-servie.yml",
    "chars": 295,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: replicaset-loadbalancer-service\nspec:\n  type: LoadBalancer\n  # Loadbalanc"
  },
  {
    "path": "16-yaml-declarative-k8s-deployments/README.md",
    "chars": 1116,
    "preview": "---\ntitle: Kubernetes Deployments with YAML\ndescription: Learn to write and test Kubernetes Deployments with YAML\n---\n\n#"
  },
  {
    "path": "16-yaml-declarative-k8s-deployments/kube-base-definition.yml",
    "chars": 41,
    "preview": "apiVersion:\nkind:\nmetadata:\n  \nspec:\n    "
  },
  {
    "path": "16-yaml-declarative-k8s-deployments/kube-manifests/01-deployment-definition.yml",
    "chars": 461,
    "preview": "apiVersion: apps/v1\nkind: Deployment  \nmetadata: # Dictionary\n  name: myapp3-deployment\nspec: # Dictionary\n  replicas: 3"
  },
  {
    "path": "16-yaml-declarative-k8s-deployments/kube-manifests/02-deployment-LoadBalancer-servie.yml",
    "chars": 253,
    "preview": "apiVersion: v1\nkind: Service \nmetadata:\n  name: deployment-loadbalancer-service\nspec:\n  type: LoadBalancer # ClusterIp, "
  },
  {
    "path": "17-yaml-declarative-k8s-services/README.md",
    "chars": 2757,
    "preview": "---\ntitle: Kubernetes Services with YAML\ndescription: Learn to write and test Kubernetes Services with YAML\n---\n\n## Step"
  },
  {
    "path": "17-yaml-declarative-k8s-services/kube-base-definition.yml",
    "chars": 37,
    "preview": "apiVersion: \nkind: \nmetadata:\n\nspec:\n"
  },
  {
    "path": "17-yaml-declarative-k8s-services/kube-manifests/01-backend-deployment.yml",
    "chars": 465,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: backend-restapp\n  labels:\n    app: backend-restapp\n    tier: back"
  },
  {
    "path": "17-yaml-declarative-k8s-services/kube-manifests/02-backend-clusterip-service.yml",
    "chars": 377,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: my-backend-service ## VERY VERY IMPORTANT - NGINX PROXYPASS needs this na"
  },
  {
    "path": "17-yaml-declarative-k8s-services/kube-manifests/03-frontend-deployment.yml",
    "chars": 473,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata:\n  name: frontend-nginxapp\n  labels:\n    app: frontend-nginxapp\n    tier:"
  },
  {
    "path": "17-yaml-declarative-k8s-services/kube-manifests/04-frontend-LoadBalancer-service.yml",
    "chars": 329,
    "preview": "apiVersion: v1\nkind: Service \nmetadata:\n  name: frontend-nginxapp-loadbalancer-service\n  labels:\n    app: frontend-nginx"
  },
  {
    "path": "18-GKE-NodePort-Service/README.md",
    "chars": 3376,
    "preview": "---\ntitle: GCP Google Kubernetes Engine GKE NodePort Service\ndescription: Implement GCP Google Kubernetes Engine GKE Nod"
  },
  {
    "path": "18-GKE-NodePort-Service/kube-manifests/01-kubernetes-deployment.yaml",
    "chars": 467,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata: #Dictionary\n  name: myapp1-deployment\nspec: # Dictionary\n  replicas: 2\n "
  },
  {
    "path": "18-GKE-NodePort-Service/kube-manifests/02-kubernetes-nodeport-service.yaml",
    "chars": 348,
    "preview": "apiVersion: v1\nkind: Service \nmetadata:\n  name: myapp1-nodeport-service\nspec:\n  type: NodePort # ClusterIP, # NodePort, "
  },
  {
    "path": "19-GKE-Headless-Service/01-kube-manifests/01-kubernetes-deployment.yaml",
    "chars": 549,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata: #Dictionary\n  name: myapp1-deployment\nspec: # Dictionary\n  replicas: 4\n "
  },
  {
    "path": "19-GKE-Headless-Service/01-kube-manifests/02-kubernetes-clusterip-service.yaml",
    "chars": 273,
    "preview": "apiVersion: v1\nkind: Service \nmetadata:\n  name: myapp1-cip-service\nspec:\n  type: ClusterIP # ClusterIP, # NodePort, # Lo"
  },
  {
    "path": "19-GKE-Headless-Service/01-kube-manifests/03-kubernetes-headless-service.yaml",
    "chars": 576,
    "preview": "apiVersion: v1\nkind: Service \nmetadata:\n  name: myapp1-headless-service\nspec:\n  #type: ClusterIP # ClusterIP, # NodePort"
  },
  {
    "path": "19-GKE-Headless-Service/02-kube-manifests-curl/01-curl-pod.yml",
    "chars": 146,
    "preview": "apiVersion: v1\nkind: Pod\nmetadata:\n  name: curl-pod\nspec:\n  containers:\n  - name: curl\n    image: curlimages/curl \n    c"
  },
  {
    "path": "19-GKE-Headless-Service/README.md",
    "chars": 5096,
    "preview": "---\ntitle: GCP Google Kubernetes Engine GKE Headless Service\ndescription: Implement GCP Google Kubernetes Engine GKE Hea"
  },
  {
    "path": "20-GKE-Private-Cluster/README.md",
    "chars": 8505,
    "preview": "---\ntitle: GCP Google Kubernetes Engine GKE Private Cluster\ndescription: Implement GCP Google Kubernetes Engine GKE Priv"
  },
  {
    "path": "20-GKE-Private-Cluster/kube-manifests/01-kubernetes-deployment.yaml",
    "chars": 513,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata: #Dictionary\n  name: myapp1-deployment\nspec: # Dictionary\n  replicas: 2\n "
  },
  {
    "path": "20-GKE-Private-Cluster/kube-manifests/02-kubernetes-loadbalancer-service.yaml",
    "chars": 240,
    "preview": "apiVersion: v1\nkind: Service \nmetadata:\n  name: myapp1-lb-service\nspec:\n  type: LoadBalancer # ClusterIp, # NodePort\n  s"
  },
  {
    "path": "21-GKE-PD-existing-SC-standard-rwo/README.md",
    "chars": 13958,
    "preview": "---\ntitle: GKE Persistent Disks Existing StorageClass standard-rwo\ndescription: Use existing storageclass standard-rwo i"
  },
  {
    "path": "21-GKE-PD-existing-SC-standard-rwo/kube-manifests/01-persistent-volume-claim.yaml",
    "chars": 839,
    "preview": "apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: mysql-pv-claim\nspec: \n  accessModes:\n    - ReadWriteOnce\n  "
  },
  {
    "path": "21-GKE-PD-existing-SC-standard-rwo/kube-manifests/02-UserManagement-ConfigMap.yaml",
    "chars": 741,
    "preview": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: usermanagement-dbcreation-script\ndata: \n  mysql_usermgmt.sql: |-\n    DR"
  },
  {
    "path": "21-GKE-PD-existing-SC-standard-rwo/kube-manifests/03-mysql-deployment.yaml",
    "chars": 2721,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mysql\nspec: \n  replicas: 1\n  selector:\n    matchLabels:\n      app"
  },
  {
    "path": "21-GKE-PD-existing-SC-standard-rwo/kube-manifests/04-mysql-clusterip-service.yaml",
    "chars": 178,
    "preview": "apiVersion: v1\nkind: Service\nmetadata: \n  name: mysql\nspec:\n  selector:\n    app: mysql \n  ports: \n    - port: 3306  \n  c"
  },
  {
    "path": "21-GKE-PD-existing-SC-standard-rwo/kube-manifests/05-UserMgmtWebApp-Deployment.yaml",
    "chars": 1108,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata:\n  name: usermgmt-webapp\n  labels:\n    app: usermgmt-webapp\nspec:\n  repli"
  },
  {
    "path": "21-GKE-PD-existing-SC-standard-rwo/kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml",
    "chars": 219,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: usermgmt-webapp-lb-service\nspec: \n  type: LoadBalancer\n  selector: \n    a"
  },
  {
    "path": "22-GKE-PD-existing-SC-premium-rwo/README.md",
    "chars": 3508,
    "preview": "---\ntitle: GKE Persistent Disks Existing StorageClass premium-rwo\ndescription: Use existing storageclass premium-rwo in "
  },
  {
    "path": "22-GKE-PD-existing-SC-premium-rwo/kube-manifests/01-persistent-volume-claim.yaml",
    "chars": 839,
    "preview": "apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: mysql-pv-claim\nspec: \n  accessModes:\n    - ReadWriteOnce\n  "
  },
  {
    "path": "22-GKE-PD-existing-SC-premium-rwo/kube-manifests/02-UserManagement-ConfigMap.yaml",
    "chars": 494,
    "preview": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: usermanagement-dbcreation-script\ndata: \n  mysql_usermgmt.sql: |-\n    DR"
  },
  {
    "path": "22-GKE-PD-existing-SC-premium-rwo/kube-manifests/03-mysql-deployment.yaml",
    "chars": 1051,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mysql\nspec: \n  replicas: 1\n  selector:\n    matchLabels:\n      app"
  },
  {
    "path": "22-GKE-PD-existing-SC-premium-rwo/kube-manifests/04-mysql-clusterip-service.yaml",
    "chars": 178,
    "preview": "apiVersion: v1\nkind: Service\nmetadata: \n  name: mysql\nspec:\n  selector:\n    app: mysql \n  ports: \n    - port: 3306  \n  c"
  },
  {
    "path": "22-GKE-PD-existing-SC-premium-rwo/kube-manifests/05-UserMgmtWebApp-Deployment.yaml",
    "chars": 1108,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata:\n  name: usermgmt-webapp\n  labels:\n    app: usermgmt-webapp\nspec:\n  repli"
  },
  {
    "path": "22-GKE-PD-existing-SC-premium-rwo/kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml",
    "chars": 255,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: usermgmt-webapp-lb-service\n  labels: \n    app: usermgmt-webapp\nspec: \n  t"
  },
  {
    "path": "23-GKE-PD-Custom-StorageClass/README.md",
    "chars": 6873,
    "preview": "---\ntitle: GKE Persistent Disks Custom StorageClass \ndescription: Use Custom storageclass to provision Google Disks for "
  },
  {
    "path": "23-GKE-PD-Custom-StorageClass/kube-manifests/00-storage-class.yaml",
    "chars": 479,
    "preview": "apiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata: \n  name: gke-pd-standard-rwo-sc\nprovisioner: pd.csi.storage.g"
  },
  {
    "path": "23-GKE-PD-Custom-StorageClass/kube-manifests/01-persistent-volume-claim.yaml",
    "chars": 921,
    "preview": "apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: mysql-pv-claim\nspec: \n  accessModes:\n    - ReadWriteOnce\n  "
  },
  {
    "path": "23-GKE-PD-Custom-StorageClass/kube-manifests/02-UserManagement-ConfigMap.yaml",
    "chars": 494,
    "preview": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: usermanagement-dbcreation-script\ndata: \n  mysql_usermgmt.sql: |-\n    DR"
  },
  {
    "path": "23-GKE-PD-Custom-StorageClass/kube-manifests/03-mysql-deployment.yaml",
    "chars": 1051,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mysql\nspec: \n  replicas: 1\n  selector:\n    matchLabels:\n      app"
  },
  {
    "path": "23-GKE-PD-Custom-StorageClass/kube-manifests/04-mysql-clusterip-service.yaml",
    "chars": 178,
    "preview": "apiVersion: v1\nkind: Service\nmetadata: \n  name: mysql\nspec:\n  selector:\n    app: mysql \n  ports: \n    - port: 3306  \n  c"
  },
  {
    "path": "23-GKE-PD-Custom-StorageClass/kube-manifests/05-UserMgmtWebApp-Deployment.yaml",
    "chars": 1142,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata:\n  name: usermgmt-webapp\n  labels:\n    app: usermgmt-webapp\nspec:\n  repli"
  },
  {
    "path": "23-GKE-PD-Custom-StorageClass/kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml",
    "chars": 255,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: usermgmt-webapp-lb-service\n  labels: \n    app: usermgmt-webapp\nspec: \n  t"
  },
  {
    "path": "24-GKE-PD-preexisting-PD/README.md",
    "chars": 3589,
    "preview": "---\ntitle: GKE Persistent Disks Preexisting PD\ndescription: Use Google Disks Preexisting PD for Kubernetes Workloads\n---"
  },
  {
    "path": "24-GKE-PD-preexisting-PD/kube-manifests/00-persistent-volume.yaml",
    "chars": 298,
    "preview": "apiVersion: v1\nkind: PersistentVolume\nmetadata:\n  name: preexisting-pd\nspec:\n  storageClassName: standard-rwo\n  capacity"
  },
  {
    "path": "24-GKE-PD-preexisting-PD/kube-manifests/01-persistent-volume-claim.yaml",
    "chars": 839,
    "preview": "apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: mysql-pv-claim\nspec: \n  accessModes:\n    - ReadWriteOnce\n  "
  },
  {
    "path": "24-GKE-PD-preexisting-PD/kube-manifests/02-UserManagement-ConfigMap.yaml",
    "chars": 494,
    "preview": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: usermanagement-dbcreation-script\ndata: \n  mysql_usermgmt.sql: |-\n    DR"
  },
  {
    "path": "24-GKE-PD-preexisting-PD/kube-manifests/03-mysql-deployment.yaml",
    "chars": 1051,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mysql\nspec: \n  replicas: 1\n  selector:\n    matchLabels:\n      app"
  },
  {
    "path": "24-GKE-PD-preexisting-PD/kube-manifests/04-mysql-clusterip-service.yaml",
    "chars": 178,
    "preview": "apiVersion: v1\nkind: Service\nmetadata: \n  name: mysql\nspec:\n  selector:\n    app: mysql \n  ports: \n    - port: 3306  \n  c"
  },
  {
    "path": "24-GKE-PD-preexisting-PD/kube-manifests/05-UserMgmtWebApp-Deployment.yaml",
    "chars": 1142,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata:\n  name: usermgmt-webapp\n  labels:\n    app: usermgmt-webapp\nspec:\n  repli"
  },
  {
    "path": "24-GKE-PD-preexisting-PD/kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml",
    "chars": 255,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: usermgmt-webapp-lb-service\n  labels: \n    app: usermgmt-webapp\nspec: \n  t"
  },
  {
    "path": "25-GKE-PD-Regional-PD/README.md",
    "chars": 4137,
    "preview": "---\ntitle: GKE Persistent Disks - Use Regional PD\ndescription: Use Google Disks Regional PD for Kubernetes Workloads\n---"
  },
  {
    "path": "25-GKE-PD-Regional-PD/kube-manifests/00-storage-class.yaml",
    "chars": 1321,
    "preview": "apiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n  name: regionalpd-storageclass\nprovisioner: pd.csi.storage.g"
  },
  {
    "path": "25-GKE-PD-Regional-PD/kube-manifests/01-persistent-volume-claim.yaml",
    "chars": 850,
    "preview": "apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: mysql-pv-claim\nspec: \n  accessModes:\n    - ReadWriteOnce\n  "
  },
  {
    "path": "25-GKE-PD-Regional-PD/kube-manifests/02-UserManagement-ConfigMap.yaml",
    "chars": 494,
    "preview": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: usermanagement-dbcreation-script\ndata: \n  mysql_usermgmt.sql: |-\n    DR"
  },
  {
    "path": "25-GKE-PD-Regional-PD/kube-manifests/03-mysql-deployment.yaml",
    "chars": 1051,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mysql\nspec: \n  replicas: 1\n  selector:\n    matchLabels:\n      app"
  },
  {
    "path": "25-GKE-PD-Regional-PD/kube-manifests/04-mysql-clusterip-service.yaml",
    "chars": 178,
    "preview": "apiVersion: v1\nkind: Service\nmetadata: \n  name: mysql\nspec:\n  selector:\n    app: mysql \n  ports: \n    - port: 3306  \n  c"
  },
  {
    "path": "25-GKE-PD-Regional-PD/kube-manifests/05-UserMgmtWebApp-Deployment.yaml",
    "chars": 1142,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata:\n  name: usermgmt-webapp\n  labels:\n    app: usermgmt-webapp\nspec:\n  repli"
  },
  {
    "path": "25-GKE-PD-Regional-PD/kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml",
    "chars": 255,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: usermgmt-webapp-lb-service\n  labels: \n    app: usermgmt-webapp\nspec: \n  t"
  },
  {
    "path": "26-GKE-PD-Volume-Snapshots-and-Restore/01-kube-manifests/01-persistent-volume-claim.yaml",
    "chars": 839,
    "preview": "apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: mysql-pv-claim\nspec: \n  accessModes:\n    - ReadWriteOnce\n  "
  },
  {
    "path": "26-GKE-PD-Volume-Snapshots-and-Restore/01-kube-manifests/02-UserManagement-ConfigMap.yaml",
    "chars": 494,
    "preview": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: usermanagement-dbcreation-script\ndata: \n  mysql_usermgmt.sql: |-\n    DR"
  },
  {
    "path": "26-GKE-PD-Volume-Snapshots-and-Restore/01-kube-manifests/03-mysql-deployment.yaml",
    "chars": 1051,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mysql\nspec: \n  replicas: 1\n  selector:\n    matchLabels:\n      app"
  },
  {
    "path": "26-GKE-PD-Volume-Snapshots-and-Restore/01-kube-manifests/04-mysql-clusterip-service.yaml",
    "chars": 178,
    "preview": "apiVersion: v1\nkind: Service\nmetadata: \n  name: mysql\nspec:\n  selector:\n    app: mysql \n  ports: \n    - port: 3306  \n  c"
  },
  {
    "path": "26-GKE-PD-Volume-Snapshots-and-Restore/01-kube-manifests/05-UserMgmtWebApp-Deployment.yaml",
    "chars": 1142,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata:\n  name: usermgmt-webapp\n  labels:\n    app: usermgmt-webapp\nspec:\n  repli"
  },
  {
    "path": "26-GKE-PD-Volume-Snapshots-and-Restore/01-kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml",
    "chars": 255,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: usermgmt-webapp-lb-service\n  labels: \n    app: usermgmt-webapp\nspec: \n  t"
  },
  {
    "path": "26-GKE-PD-Volume-Snapshots-and-Restore/02-Volume-Snapshot/01-VolumeSnapshotClass.yaml",
    "chars": 384,
    "preview": "apiVersion: snapshot.storage.k8s.io/v1\nkind: VolumeSnapshotClass\nmetadata:\n  name: my-snapshotclass\ndriver: pd.csi.stora"
  },
  {
    "path": "26-GKE-PD-Volume-Snapshots-and-Restore/02-Volume-Snapshot/02-VolumeSnapshot.yaml",
    "chars": 196,
    "preview": "apiVersion: snapshot.storage.k8s.io/v1\nkind: VolumeSnapshot\nmetadata:\n  name: my-snapshot1\nspec:\n  volumeSnapshotClassNa"
  },
  {
    "path": "26-GKE-PD-Volume-Snapshots-and-Restore/03-Volume-Restore/01-restore-pvc.yaml",
    "chars": 292,
    "preview": "apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: pvc-restore\nspec:\n  dataSource:\n    name: my-snapshot1\n    "
  },
  {
    "path": "26-GKE-PD-Volume-Snapshots-and-Restore/03-Volume-Restore/02-mysql-deployment.yaml",
    "chars": 1087,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mysql\nspec: \n  replicas: 1\n  selector:\n    matchLabels:\n      app"
  },
  {
    "path": "26-GKE-PD-Volume-Snapshots-and-Restore/README.md",
    "chars": 7674,
    "preview": "---\ntitle: GKE Persistent Disks - Volume Snapshots and Restore\ndescription: Use Google Disks Volume Snapshots and Restor"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/01-kube-manifests/01-persistent-volume-claim.yaml",
    "chars": 839,
    "preview": "apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: mysql-pv-claim\nspec: \n  accessModes:\n    - ReadWriteOnce\n  "
  },
  {
    "path": "27-GKE-PD-Volume-Clone/01-kube-manifests/02-UserManagement-ConfigMap.yaml",
    "chars": 494,
    "preview": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: usermanagement-dbcreation-script\ndata: \n  mysql_usermgmt.sql: |-\n    DR"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/01-kube-manifests/03-mysql-deployment.yaml",
    "chars": 1051,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mysql\nspec: \n  replicas: 1\n  selector:\n    matchLabels:\n      app"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/01-kube-manifests/04-mysql-clusterip-service.yaml",
    "chars": 178,
    "preview": "apiVersion: v1\nkind: Service\nmetadata: \n  name: mysql\nspec:\n  selector:\n    app: mysql \n  ports: \n    - port: 3306  \n  c"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/01-kube-manifests/05-UserMgmtWebApp-Deployment.yaml",
    "chars": 1142,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata:\n  name: usermgmt-webapp\n  labels:\n    app: usermgmt-webapp\nspec:\n  repli"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/01-kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml",
    "chars": 255,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: usermgmt-webapp-lb-service\n  labels: \n    app: usermgmt-webapp\nspec: \n  t"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/02-Use-Cloned-Volume-kube-manifests/01-podpvc-clone.yaml",
    "chars": 523,
    "preview": "apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: podpvc-clone\nspec:\n  dataSource:\n    name: mysql-pv-claim #"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/02-Use-Cloned-Volume-kube-manifests/02-UserManagement-ConfigMap.yaml",
    "chars": 495,
    "preview": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: usermanagement-dbcreation-script2\ndata: \n  mysql_usermgmt.sql: |-\n    D"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/02-Use-Cloned-Volume-kube-manifests/03-mysql-deployment.yaml",
    "chars": 1093,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mysql2\nspec: \n  replicas: 1\n  selector:\n    matchLabels:\n      ap"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/02-Use-Cloned-Volume-kube-manifests/04-mysql-clusterip-service.yaml",
    "chars": 180,
    "preview": "apiVersion: v1\nkind: Service\nmetadata: \n  name: mysql2\nspec:\n  selector:\n    app: mysql2 \n  ports: \n    - port: 3306  \n "
  },
  {
    "path": "27-GKE-PD-Volume-Clone/02-Use-Cloned-Volume-kube-manifests/05-UserMgmtWebApp-Deployment.yaml",
    "chars": 1149,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata:\n  name: usermgmt-webapp2\n  labels:\n    app: usermgmt-webapp2\nspec:\n  rep"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/02-Use-Cloned-Volume-kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml",
    "chars": 258,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: usermgmt-webapp2-lb-service\n  labels: \n    app: usermgmt-webapp2\nspec: \n "
  },
  {
    "path": "27-GKE-PD-Volume-Clone/03-With-NodeSelectors/01-kube-manifests/01-persistent-volume-claim.yaml",
    "chars": 839,
    "preview": "apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: mysql-pv-claim\nspec: \n  accessModes:\n    - ReadWriteOnce\n  "
  },
  {
    "path": "27-GKE-PD-Volume-Clone/03-With-NodeSelectors/01-kube-manifests/02-UserManagement-ConfigMap.yaml",
    "chars": 494,
    "preview": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: usermanagement-dbcreation-script\ndata: \n  mysql_usermgmt.sql: |-\n    DR"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/03-With-NodeSelectors/01-kube-manifests/03-mysql-deployment.yaml",
    "chars": 1092,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mysql\nspec: \n  replicas: 1\n  selector:\n    matchLabels:\n      app"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/03-With-NodeSelectors/01-kube-manifests/04-mysql-clusterip-service.yaml",
    "chars": 178,
    "preview": "apiVersion: v1\nkind: Service\nmetadata: \n  name: mysql\nspec:\n  selector:\n    app: mysql \n  ports: \n    - port: 3306  \n  c"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/03-With-NodeSelectors/01-kube-manifests/05-UserMgmtWebApp-Deployment.yaml",
    "chars": 1142,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata:\n  name: usermgmt-webapp\n  labels:\n    app: usermgmt-webapp\nspec:\n  repli"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/03-With-NodeSelectors/01-kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml",
    "chars": 255,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: usermgmt-webapp-lb-service\n  labels: \n    app: usermgmt-webapp\nspec: \n  t"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/03-With-NodeSelectors/02-Use-Cloned-Volume-kube-manifests/01-podpvc-clone.yaml",
    "chars": 523,
    "preview": "apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: podpvc-clone\nspec:\n  dataSource:\n    name: mysql-pv-claim #"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/03-With-NodeSelectors/02-Use-Cloned-Volume-kube-manifests/02-UserManagement-ConfigMap.yaml",
    "chars": 495,
    "preview": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: usermanagement-dbcreation-script2\ndata: \n  mysql_usermgmt.sql: |-\n    D"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/03-With-NodeSelectors/02-Use-Cloned-Volume-kube-manifests/03-mysql-deployment.yaml",
    "chars": 1134,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mysql2\nspec: \n  replicas: 1\n  selector:\n    matchLabels:\n      ap"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/03-With-NodeSelectors/02-Use-Cloned-Volume-kube-manifests/04-mysql-clusterip-service.yaml",
    "chars": 180,
    "preview": "apiVersion: v1\nkind: Service\nmetadata: \n  name: mysql2\nspec:\n  selector:\n    app: mysql2 \n  ports: \n    - port: 3306  \n "
  },
  {
    "path": "27-GKE-PD-Volume-Clone/03-With-NodeSelectors/02-Use-Cloned-Volume-kube-manifests/05-UserMgmtWebApp-Deployment.yaml",
    "chars": 1149,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata:\n  name: usermgmt-webapp2\n  labels:\n    app: usermgmt-webapp2\nspec:\n  rep"
  },
  {
    "path": "27-GKE-PD-Volume-Clone/03-With-NodeSelectors/02-Use-Cloned-Volume-kube-manifests/06-UserMgmtWebApp-LoadBalancer-Service.yaml",
    "chars": 258,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: usermgmt-webapp2-lb-service\n  labels: \n    app: usermgmt-webapp2\nspec: \n "
  },
  {
    "path": "27-GKE-PD-Volume-Clone/README.md",
    "chars": 6544,
    "preview": "---\ntitle: GKE Persistent Disks - Volume Clone\ndescription: Use Google Disks Volume Clone for GKE Workloads\n---\n\n\n## Ste"
  },
  {
    "path": "28-GKE-Storage-with-GCP-CloudSQL-Public/README.md",
    "chars": 6870,
    "preview": "---\ntitle: GKE Storage with GCP Cloud SQL - MySQL Public Instance\ndescription: Use GCP Cloud SQL MySQL DB for GKE Worklo"
  },
  {
    "path": "28-GKE-Storage-with-GCP-CloudSQL-Public/kube-manifests/01-MySQL-externalName-Service.yaml",
    "chars": 130,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: mysql-externalname-service\nspec:\n  type: ExternalName\n  externalName: 35."
  },
  {
    "path": "28-GKE-Storage-with-GCP-CloudSQL-Public/kube-manifests/02-Kubernetes-Secrets.yaml",
    "chars": 228,
    "preview": "apiVersion: v1\nkind: Secret\nmetadata:\n  name: mysql-db-password\ntype: Opaque\ndata: \n  db-password: S2FseWFuUmVkZHkxMw==\n"
  },
  {
    "path": "28-GKE-Storage-with-GCP-CloudSQL-Public/kube-manifests/03-UserMgmtWebApp-Deployment.yaml",
    "chars": 1271,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata:\n  name: usermgmt-webapp\n  labels:\n    app: usermgmt-webapp\nspec:\n  repli"
  },
  {
    "path": "28-GKE-Storage-with-GCP-CloudSQL-Public/kube-manifests/04-UserMgmtWebApp-LoadBalancer-Service.yaml",
    "chars": 255,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: usermgmt-webapp-lb-service\n  labels: \n    app: usermgmt-webapp\nspec: \n  t"
  },
  {
    "path": "29-GKE-Storage-with-GCP-CloudSQL-Private/README.md",
    "chars": 7936,
    "preview": "---\ntitle: GKE Storage with GCP Cloud SQL - MySQL Private Instance\ndescription: Use GCP Cloud SQL MySQL DB for GKE Workl"
  },
  {
    "path": "29-GKE-Storage-with-GCP-CloudSQL-Private/kube-manifests/01-MySQL-externalName-Service.yaml",
    "chars": 126,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: mysql-externalname-service\nspec:\n  type: ExternalName\n  externalName: 10."
  },
  {
    "path": "29-GKE-Storage-with-GCP-CloudSQL-Private/kube-manifests/02-Kubernetes-Secrets.yaml",
    "chars": 228,
    "preview": "apiVersion: v1\nkind: Secret\nmetadata:\n  name: mysql-db-password\ntype: Opaque\ndata: \n  db-password: S2FseWFuUmVkZHkxMw==\n"
  },
  {
    "path": "29-GKE-Storage-with-GCP-CloudSQL-Private/kube-manifests/03-UserMgmtWebApp-Deployment.yaml",
    "chars": 1271,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata:\n  name: usermgmt-webapp\n  labels:\n    app: usermgmt-webapp\nspec:\n  repli"
  },
  {
    "path": "29-GKE-Storage-with-GCP-CloudSQL-Private/kube-manifests/04-UserMgmtWebApp-LoadBalancer-Service.yaml",
    "chars": 255,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: usermgmt-webapp-lb-service\n  labels: \n    app: usermgmt-webapp\nspec: \n  t"
  },
  {
    "path": "30-GCP-CloudSQL-Private-NO-ExternalNameService/README.md",
    "chars": 4353,
    "preview": "---\ntitle: GKE Storage with GCP Cloud SQL - Without ExternalName Service\ndescription: Use GCP Cloud SQL MySQL DB for GKE"
  },
  {
    "path": "30-GCP-CloudSQL-Private-NO-ExternalNameService/kube-manifests/01-Kubernetes-Secrets.yaml",
    "chars": 228,
    "preview": "apiVersion: v1\nkind: Secret\nmetadata:\n  name: mysql-db-password\ntype: Opaque\ndata: \n  db-password: S2FseWFuUmVkZHkxMw==\n"
  },
  {
    "path": "30-GCP-CloudSQL-Private-NO-ExternalNameService/kube-manifests/02-UserMgmtWebApp-Deployment.yaml",
    "chars": 1527,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata:\n  name: usermgmt-webapp\n  labels:\n    app: usermgmt-webapp\nspec:\n  repli"
  },
  {
    "path": "30-GCP-CloudSQL-Private-NO-ExternalNameService/kube-manifests/03-UserMgmtWebApp-LoadBalancer-Service.yaml",
    "chars": 255,
    "preview": "apiVersion: v1\nkind: Service\nmetadata:\n  name: usermgmt-webapp-lb-service\n  labels: \n    app: usermgmt-webapp\nspec: \n  t"
  },
  {
    "path": "31-GKE-FileStore-default-StorageClass/README.md",
    "chars": 5529,
    "preview": "---\ntitle: GKE Storage with GCP File Store - Default StorageClass\ndescription: Use GCP File Store for GKE Workloads with"
  },
  {
    "path": "31-GKE-FileStore-default-StorageClass/kube-manifests/01-filestore-pvc.yaml",
    "chars": 197,
    "preview": "kind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: gke-filestore-pvc\nspec:\n  accessModes:\n  - ReadWriteMany\n  "
  },
  {
    "path": "31-GKE-FileStore-default-StorageClass/kube-manifests/02-write-to-filestore-pod.yaml",
    "chars": 462,
    "preview": "apiVersion: v1\nkind: Pod\nmetadata:\n  name: filestore-writer-app\nspec:\n  containers:\n    - name: app\n      image: centos\n"
  },
  {
    "path": "31-GKE-FileStore-default-StorageClass/kube-manifests/03-myapp1-deployment.yaml",
    "chars": 723,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata: #Dictionary\n  name: myapp1-deployment\nspec: # Dictionary\n  replicas: 2\n "
  },
  {
    "path": "31-GKE-FileStore-default-StorageClass/kube-manifests/04-loadBalancer-service.yaml",
    "chars": 240,
    "preview": "apiVersion: v1\nkind: Service \nmetadata:\n  name: myapp1-lb-service\nspec:\n  type: LoadBalancer # ClusterIp, # NodePort\n  s"
  },
  {
    "path": "32-GKE-FileStore-custom-StorageClass/README.md",
    "chars": 4269,
    "preview": "---\ntitle: GKE Storage with GCP File Store - Custom StorageClass\ndescription: Use GCP File Store for GKE Workloads with "
  },
  {
    "path": "32-GKE-FileStore-custom-StorageClass/kube-manifests/00-filestore-storage-class.yaml",
    "chars": 478,
    "preview": "apiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n  name: filestore-storage-class\nprovisioner: filestore.csi.st"
  },
  {
    "path": "32-GKE-FileStore-custom-StorageClass/kube-manifests/01-filestore-pvc.yaml",
    "chars": 208,
    "preview": "kind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: gke-filestore-pvc\nspec:\n  accessModes:\n  - ReadWriteMany\n  "
  },
  {
    "path": "32-GKE-FileStore-custom-StorageClass/kube-manifests/02-write-to-filestore-pod.yaml",
    "chars": 462,
    "preview": "apiVersion: v1\nkind: Pod\nmetadata:\n  name: filestore-writer-app\nspec:\n  containers:\n    - name: app\n      image: centos\n"
  },
  {
    "path": "32-GKE-FileStore-custom-StorageClass/kube-manifests/03-myapp1-deployment.yaml",
    "chars": 723,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata: #Dictionary\n  name: myapp1-deployment\nspec: # Dictionary\n  replicas: 2\n "
  },
  {
    "path": "32-GKE-FileStore-custom-StorageClass/kube-manifests/04-loadBalancer-service.yaml",
    "chars": 240,
    "preview": "apiVersion: v1\nkind: Service \nmetadata:\n  name: myapp1-lb-service\nspec:\n  type: LoadBalancer # ClusterIp, # NodePort\n  s"
  },
  {
    "path": "33-GKE-FileStore-Backup-and-Restore/01-myapp1-kube-manifests/01-filestore-pvc.yaml",
    "chars": 224,
    "preview": "kind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: gke-filestore-pvc\nspec:\n  accessModes:\n  - ReadWriteMany\n  "
  },
  {
    "path": "33-GKE-FileStore-Backup-and-Restore/01-myapp1-kube-manifests/02-write-to-filestore-pod.yaml",
    "chars": 462,
    "preview": "apiVersion: v1\nkind: Pod\nmetadata:\n  name: filestore-writer-app\nspec:\n  containers:\n    - name: app\n      image: centos\n"
  },
  {
    "path": "33-GKE-FileStore-Backup-and-Restore/01-myapp1-kube-manifests/03-myapp1-deployment.yaml",
    "chars": 723,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata: #Dictionary\n  name: myapp1-deployment\nspec: # Dictionary\n  replicas: 2\n "
  },
  {
    "path": "33-GKE-FileStore-Backup-and-Restore/01-myapp1-kube-manifests/04-loadBalancer-service.yaml",
    "chars": 356,
    "preview": "apiVersion: v1\nkind: Service \nmetadata:\n  name: myapp1-lb-service\nspec:\n  type: LoadBalancer # ClusterIp, # NodePort\n  s"
  },
  {
    "path": "33-GKE-FileStore-Backup-and-Restore/02-volume-backup-kube-manifests/01-VolumeSnapshotClass.yaml",
    "chars": 205,
    "preview": "apiVersion: snapshot.storage.k8s.io/v1\nkind: VolumeSnapshotClass\nmetadata:\n  name: csi-gcp-filestore-backup-snap-class\nd"
  },
  {
    "path": "33-GKE-FileStore-Backup-and-Restore/02-volume-backup-kube-manifests/02-VolumeSnapshot.yaml",
    "chars": 228,
    "preview": "apiVersion: snapshot.storage.k8s.io/v1\nkind: VolumeSnapshot\nmetadata:\n  name: myapp1-volume-snapshot\nspec:\n  volumeSnaps"
  },
  {
    "path": "33-GKE-FileStore-Backup-and-Restore/03-volume-restore-myapp2-kube-manifests/01-filestore-pvc.yaml",
    "chars": 317,
    "preview": "kind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: restored-filestore-pvc\nspec:\n  accessModes:\n  - ReadWriteMa"
  },
  {
    "path": "33-GKE-FileStore-Backup-and-Restore/03-volume-restore-myapp2-kube-manifests/02-myapp2-deployment.yaml",
    "chars": 716,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata: #Dictionary\n  name: myapp2-deployment\nspec: # Dictionary\n  replicas: 2\n "
  },
  {
    "path": "33-GKE-FileStore-Backup-and-Restore/03-volume-restore-myapp2-kube-manifests/03-myapp2-loadBalancer-service.yaml",
    "chars": 240,
    "preview": "apiVersion: v1\nkind: Service \nmetadata:\n  name: myapp2-lb-service\nspec:\n  type: LoadBalancer # ClusterIp, # NodePort\n  s"
  },
  {
    "path": "33-GKE-FileStore-Backup-and-Restore/README.md",
    "chars": 7723,
    "preview": "---\ntitle: GKE Storage with GCP File Store - Backup and Restore\ndescription: Use GCP File Store for GKE Workloads - Impl"
  },
  {
    "path": "34-GKE-Ingress-Basics/README.md",
    "chars": 3646,
    "preview": "---\ntitle: GCP Google Kubernetes Engine GKE Ingress Basics\ndescription: Implement GCP Google Kubernetes Engine GKE Ingre"
  },
  {
    "path": "34-GKE-Ingress-Basics/kube-manifests/01-Nginx-App3-Deployment-and-NodePortService.yaml",
    "chars": 621,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app3-nginx-deployment\n  labels:\n    app: app3-nginx\nspec:\n  repli"
  },
  {
    "path": "34-GKE-Ingress-Basics/kube-manifests/02-ingress-basic.yaml",
    "chars": 415,
    "preview": "apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: ingress-basics\n  annotations:\n    # If the class annota"
  },
  {
    "path": "35-GKE-Ingress-Context-Path-Routing/README.md",
    "chars": 3794,
    "preview": "---\ntitle: GCP Google Kubernetes Engine GKE Ingress Context Path Routing\ndescription: Implement GCP Google Kubernetes En"
  },
  {
    "path": "35-GKE-Ingress-Context-Path-Routing/kube-manifests/01-Nginx-App1-Deployment-and-NodePortService.yaml",
    "chars": 621,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app1-nginx-deployment\n  labels:\n    app: app1-nginx\nspec:\n  repli"
  },
  {
    "path": "35-GKE-Ingress-Context-Path-Routing/kube-manifests/02-Nginx-App2-Deployment-and-NodePortService.yaml",
    "chars": 627,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app2-nginx-deployment\n  labels:\n    app: app2-nginx \nspec:\n  repl"
  },
  {
    "path": "35-GKE-Ingress-Context-Path-Routing/kube-manifests/03-Nginx-App3-Deployment-and-NodePortService.yaml",
    "chars": 617,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app3-nginx-deployment\n  labels:\n    app: app3-nginx \nspec:\n  repl"
  },
  {
    "path": "35-GKE-Ingress-Context-Path-Routing/kube-manifests/04-Ingress-ContextPath-Based-Routing.yaml",
    "chars": 993,
    "preview": "apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: ingress-cpr\n  annotations:\n    # External Load Balancer"
  },
  {
    "path": "36-GKE-Ingress-Custom-Health-Check/README.md",
    "chars": 5967,
    "preview": "---\ntitle: GCP Google Kubernetes Engine Ingress Custom Health Check\ndescription: Implement GCP Google Kubernetes Engine "
  },
  {
    "path": "36-GKE-Ingress-Custom-Health-Check/kube-manifests/01-Nginx-App1-Deployment-and-NodePortService.yaml",
    "chars": 987,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app1-nginx-deployment\n  labels:\n    app: app1-nginx\nspec:\n  repli"
  },
  {
    "path": "36-GKE-Ingress-Custom-Health-Check/kube-manifests/02-Nginx-App2-Deployment-and-NodePortService.yaml",
    "chars": 988,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app2-nginx-deployment\n  labels:\n    app: app2-nginx \nspec:\n  repl"
  },
  {
    "path": "36-GKE-Ingress-Custom-Health-Check/kube-manifests/03-Nginx-App3-Deployment-and-NodePortService.yaml",
    "chars": 974,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app3-nginx-deployment\n  labels:\n    app: app3-nginx \nspec:\n  repl"
  },
  {
    "path": "36-GKE-Ingress-Custom-Health-Check/kube-manifests/04-Ingress-Custom-Healthcheck.yaml",
    "chars": 1001,
    "preview": "apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: ingress-custom-healthcheck\n  annotations:\n    # Externa"
  },
  {
    "path": "37-Google-Cloud-Domains/README.md",
    "chars": 873,
    "preview": "---\ntitle: Google Cloud Domains\ndescription: Register Domain Name using Google Cloud Domains\n---\n\n## Step-01: Introducti"
  },
  {
    "path": "38-GKE-Ingress-ExternalIP/README.md",
    "chars": 4081,
    "preview": "---\ntitle: GCP Google Kubernetes Engine GKE Ingress with External IP\ndescription: Implement GCP Google Kubernetes Engine"
  },
  {
    "path": "38-GKE-Ingress-ExternalIP/kube-manifests/01-Nginx-App1-Deployment-and-NodePortService.yaml",
    "chars": 987,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app1-nginx-deployment\n  labels:\n    app: app1-nginx\nspec:\n  repli"
  },
  {
    "path": "38-GKE-Ingress-ExternalIP/kube-manifests/02-Nginx-App2-Deployment-and-NodePortService.yaml",
    "chars": 988,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app2-nginx-deployment\n  labels:\n    app: app2-nginx \nspec:\n  repl"
  },
  {
    "path": "38-GKE-Ingress-ExternalIP/kube-manifests/03-Nginx-App3-Deployment-and-NodePortService.yaml",
    "chars": 974,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app3-nginx-deployment\n  labels:\n    app: app3-nginx \nspec:\n  repl"
  },
  {
    "path": "38-GKE-Ingress-ExternalIP/kube-manifests/04-Ingress-external-ip.yaml",
    "chars": 865,
    "preview": "apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: ingress-external-ip\n  annotations:\n    # External Load "
  },
  {
    "path": "39-GKE-Ingress-Google-Managed-SSL/README.md",
    "chars": 4911,
    "preview": "---\ntitle: GCP Google Kubernetes Engine GKE Ingress SSL\ndescription: Implement GCP Google Kubernetes Engine GKE Ingress "
  },
  {
    "path": "39-GKE-Ingress-Google-Managed-SSL/kube-manifests/01-Nginx-App1-Deployment-and-NodePortService.yaml",
    "chars": 987,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app1-nginx-deployment\n  labels:\n    app: app1-nginx\nspec:\n  repli"
  },
  {
    "path": "39-GKE-Ingress-Google-Managed-SSL/kube-manifests/02-Nginx-App2-Deployment-and-NodePortService.yaml",
    "chars": 988,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app2-nginx-deployment\n  labels:\n    app: app2-nginx \nspec:\n  repl"
  },
  {
    "path": "39-GKE-Ingress-Google-Managed-SSL/kube-manifests/03-Nginx-App3-Deployment-and-NodePortService.yaml",
    "chars": 974,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app3-nginx-deployment\n  labels:\n    app: app3-nginx \nspec:\n  repl"
  },
  {
    "path": "39-GKE-Ingress-Google-Managed-SSL/kube-manifests/04-Ingress-SSL.yaml",
    "chars": 965,
    "preview": "apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: ingress-ssl\n  annotations:\n    # External Load Balancer"
  },
  {
    "path": "39-GKE-Ingress-Google-Managed-SSL/kube-manifests/05-Managed-Certificate.yaml",
    "chars": 151,
    "preview": "apiVersion: networking.gke.io/v1\nkind: ManagedCertificate\nmetadata:\n  name: managed-cert-for-ingress\nspec:\n  domains:\n  "
  },
  {
    "path": "40-GKE-Ingress-Google-Managed-SSL-Redirect/README.md",
    "chars": 4540,
    "preview": "---\ntitle: GCP Google Kubernetes Engine GKE Ingress SSL Redirect\ndescription: Implement GCP Google Kubernetes Engine GKE"
  },
  {
    "path": "40-GKE-Ingress-Google-Managed-SSL-Redirect/kube-manifests/01-Nginx-App1-Deployment-and-NodePortService.yaml",
    "chars": 987,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app1-nginx-deployment\n  labels:\n    app: app1-nginx\nspec:\n  repli"
  },
  {
    "path": "40-GKE-Ingress-Google-Managed-SSL-Redirect/kube-manifests/02-Nginx-App2-Deployment-and-NodePortService.yaml",
    "chars": 988,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app2-nginx-deployment\n  labels:\n    app: app2-nginx \nspec:\n  repl"
  },
  {
    "path": "40-GKE-Ingress-Google-Managed-SSL-Redirect/kube-manifests/03-Nginx-App3-Deployment-and-NodePortService.yaml",
    "chars": 974,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app3-nginx-deployment\n  labels:\n    app: app3-nginx \nspec:\n  repl"
  },
  {
    "path": "40-GKE-Ingress-Google-Managed-SSL-Redirect/kube-manifests/04-Ingress-SSL.yaml",
    "chars": 1064,
    "preview": "apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: ingress-ssl\n  annotations:\n    # External Load Balancer"
  },
  {
    "path": "40-GKE-Ingress-Google-Managed-SSL-Redirect/kube-manifests/05-Managed-Certificate.yaml",
    "chars": 150,
    "preview": "apiVersion: networking.gke.io/v1\nkind: ManagedCertificate\nmetadata:\n  name: managed-cert-for-ingress\nspec:\n  domains:\n  "
  },
  {
    "path": "40-GKE-Ingress-Google-Managed-SSL-Redirect/kube-manifests/06-frontendconfig.yaml",
    "chars": 175,
    "preview": "apiVersion: networking.gke.io/v1beta1\nkind: FrontendConfig\nmetadata:\n  name: my-frontend-config\nspec:\n  redirectToHttps:"
  },
  {
    "path": "41-GKE-Workload-Identity/README.md",
    "chars": 12844,
    "preview": "---\ntitle: GCP Google Kubernetes Engine GKE Workload Identity\ndescription: Implement GCP Google Kubernetes Engine GKE Wo"
  },
  {
    "path": "41-GKE-Workload-Identity/kube-manifests/01-wid-demo-pod-without-sa.yaml",
    "chars": 298,
    "preview": "apiVersion: v1\nkind: Pod\nmetadata:\n  name: wid-demo-without-sa\n  namespace: wid-kns\nspec:\n  containers:\n  - image: googl"
  },
  {
    "path": "41-GKE-Workload-Identity/kube-manifests/02-wid-demo-pod-with-sa.yaml",
    "chars": 291,
    "preview": "apiVersion: v1\nkind: Pod\nmetadata:\n  name: wid-demo-with-sa\n  namespace: wid-kns\nspec:\n  containers:\n  - image: google/c"
  },
  {
    "path": "42-GKE-ExternalDNS-Install/README.md",
    "chars": 13681,
    "preview": "---\ntitle: GCP Google Kubernetes Engine GKE External DNS Install\ndescription: Implement GCP Google Kubernetes Engine GKE"
  },
  {
    "path": "43-GKE-ExternalDNS-Ingress-Demo/README.md",
    "chars": 3898,
    "preview": "---\ntitle: GCP Google Kubernetes Engine GKE Ingress with External DNS \ndescription: Implement GCP Google Kubernetes Engi"
  },
  {
    "path": "43-GKE-ExternalDNS-Ingress-Demo/kube-manifests/01-Nginx-App3-Deployment-and-NodePortService.yaml",
    "chars": 935,
    "preview": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: app3-nginx-deployment\n  labels:\n    app: app3-nginx\nspec:\n  repli"
  },
  {
    "path": "43-GKE-ExternalDNS-Ingress-Demo/kube-manifests/02-ingress-external-dns.yaml",
    "chars": 585,
    "preview": "apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: ingress-externaldns-demo\n  annotations:\n    # If the cl"
  },
  {
    "path": "44-GKE-ExternalDNS-Service-Demo/README.md",
    "chars": 3208,
    "preview": "---\ntitle: GCP Google Kubernetes Engine GKE Service with External DNS \ndescription: Implement GCP Google Kubernetes Engi"
  },
  {
    "path": "44-GKE-ExternalDNS-Service-Demo/kube-manifests/01-kubernetes-deployment.yaml",
    "chars": 467,
    "preview": "apiVersion: apps/v1\nkind: Deployment \nmetadata: #Dictionary\n  name: myapp1-deployment\nspec: # Dictionary\n  replicas: 2\n "
  },
  {
    "path": "44-GKE-ExternalDNS-Service-Demo/kube-manifests/02-kubernetes-loadbalancer-service.yaml",
    "chars": 416,
    "preview": "apiVersion: v1\nkind: Service \nmetadata:\n  name: myapp1-lb-service\n  annotations:\n    # External DNS - For creating a Rec"
  },
  {
    "path": "45-GKE-Ingress-NameBasedVhost-Routing/README.md",
    "chars": 5498,
    "preview": "---\ntitle: GCP Google Kubernetes Engine GKE Ingress Namebased Virtual Host Routing\ndescription: Implement GCP Google Kub"
  }
]

// ... and 187 more files (download for full content)

About this extraction

This page contains the full source code of the stacksimplify/google-kubernetes-engine GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 387 files (16.2 MB), approximately 179.2k tokens. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!