Repository: sebgoa/oreilly-kubernetes Branch: master Commit: fb5e4c86f007 Files: 157 Total size: 215.0 KB Directory structure: gitextract_yj86q4zf/ ├── .gitignore ├── AGENDA.md ├── LICENSE ├── README.md ├── history/ │ ├── 03012019/ │ │ └── oreilly.txt │ ├── 09282018/ │ │ └── history.txt │ ├── 11042017/ │ │ └── history.txt │ ├── 14022016/ │ │ ├── history-1.txt │ │ └── history.txt │ ├── 14072017/ │ │ └── 14072017.txt │ ├── 16022016/ │ │ ├── history-1.txt │ │ └── history.txt │ ├── 16052017/ │ │ └── history.txt │ ├── 21022017/ │ │ └── history.txt │ ├── 22082017/ │ │ └── 22082017.txt │ ├── 23052018/ │ │ └── history.txt │ └── 7062018/ │ └── history.txt ├── kusto/ │ ├── base/ │ │ ├── kustomization.yaml │ │ └── pod.yaml │ └── overlays/ │ ├── dev/ │ │ └── kustomization.yaml │ └── prod/ │ └── kustomization.yaml ├── manifests/ │ ├── 01-pod/ │ │ ├── README.md │ │ ├── busybox.yaml │ │ ├── foobar.yaml │ │ ├── lifecycle.yaml │ │ ├── multi.yaml │ │ └── redis.yaml │ ├── 02-quota/ │ │ ├── README.md │ │ ├── quota.yaml │ │ ├── rq.yaml │ │ └── rq.yaml.fmn │ ├── 03-rs/ │ │ ├── README.md │ │ ├── redis-rc.yaml │ │ ├── rs-example.yml │ │ └── rs.yaml │ ├── 04-services/ │ │ ├── README.md │ │ ├── headless.yaml │ │ └── svc.yaml │ ├── 05-ingress-controller/ │ │ ├── README.md │ │ ├── backend.yaml │ │ ├── frontend.yaml │ │ ├── game.yaml │ │ ├── ghost.yaml │ │ ├── ingress.yaml │ │ └── wordpress.yaml │ ├── 06-volumes/ │ │ ├── README.md │ │ ├── cm-vol.yaml │ │ ├── configmap.yaml │ │ ├── foobar.md │ │ ├── hostpath.yaml │ │ ├── mysql.yaml │ │ ├── oreilly/ │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── templates/ │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── deployment.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ └── service.yaml │ │ │ └── values.yaml │ │ ├── pv.yaml │ │ ├── pvc.yaml │ │ └── volumes.yaml │ ├── 07-crd/ │ │ ├── README.md │ │ ├── bd.yml │ │ ├── database.yml │ │ └── db.yml │ ├── 08-security/ │ │ ├── README.md │ │ ├── nginx.yaml │ │ └── test.yaml │ ├── README.md │ ├── canary/ │ │ ├── README.md │ │ ├── blue-deploy.yaml │ │ ├── blue-files/ │ │ │ └── index.html │ │ ├── configmap.sh │ │ ├── red-deploy.yaml │ │ ├── red-files/ │ │ │ └── index.html │ │ ├── redblue-ingress.yaml │ │ └── redblue-svc.yaml │ ├── configmaps/ │ │ ├── README.md │ │ ├── configmap.yaml │ │ ├── foobar.yaml │ │ ├── pod.yaml │ │ └── update-configmap.sh │ ├── init-container/ │ │ └── init.yaml │ ├── logging/ │ │ ├── allinone.yaml │ │ ├── configs.yaml │ │ ├── dashboards.json │ │ ├── dashboards.yaml │ │ ├── grafana.ini │ │ ├── grafana.json │ │ └── grafana2.json │ ├── nodeselector/ │ │ └── pod-to-arch-amd64.yaml │ ├── old/ │ │ └── 1605207/ │ │ ├── configmap.yaml │ │ ├── foobar.yml │ │ ├── game-svc.yml │ │ ├── game.yml │ │ ├── hostpath.yaml │ │ ├── mysql.yml │ │ ├── nb.yml │ │ ├── notebooks.yml │ │ ├── pvc.yaml │ │ └── volumes.yaml │ ├── scheduling/ │ │ ├── README.md │ │ ├── binding.json │ │ ├── foobar.yaml │ │ ├── redis-sched.yaml │ │ ├── redis-selector.yaml │ │ ├── redis.yaml │ │ └── scheduler.py │ ├── security/ │ │ ├── openssl-generate-certs.sh │ │ └── pawn.yaml │ └── wordpress/ │ ├── march13/ │ │ ├── mysql-svc.yaml │ │ ├── mysql.yaml │ │ ├── quota.yaml │ │ ├── wordpress/ │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── templates/ │ │ │ │ ├── mysql-svc.yaml │ │ │ │ ├── mysql.yaml │ │ │ │ ├── quota.yaml │ │ │ │ ├── wordpress-svc.yaml │ │ │ │ └── wordpress.yaml │ │ │ └── values.yaml │ │ ├── wordpress-ns.yaml │ │ ├── wordpress-svc.yaml │ │ └── wordpress.yaml │ ├── mysql-secret.yaml │ ├── mysql.yaml │ ├── secret.json │ ├── wordpress/ │ │ ├── mysql-svc.yaml │ │ ├── mysql.yaml │ │ ├── wp-svc.yaml │ │ └── wp.yaml │ ├── wordpress-secret.yaml │ ├── wordpress.yaml │ ├── wp-svc.yaml │ └── wp.yaml ├── monitoring/ │ ├── grafana-statefulset.yaml │ ├── grafana-svc.yaml │ ├── monitoring-namespace.yaml │ ├── node-exporter-daemonset.yaml │ ├── node-exporter-svc.yaml │ ├── prometheus-config.yaml │ ├── prometheus-rbac.yaml │ ├── prometheus-statefulset.yaml │ └── prometheus-svc.yaml ├── scripts/ │ ├── create_binding.py │ ├── create_cronjob.py │ ├── create_pod.py │ ├── k3d.sh │ ├── k8s.sh │ ├── kk8s.sh │ ├── kopf/ │ │ ├── README.md │ │ ├── crd.yaml │ │ ├── example.py │ │ └── obj.yaml │ └── kubeadminit.sh └── template/ └── jinja-test.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ .kube* .DS_Store* ================================================ FILE: AGENDA.md ================================================ # Schedule The timeframes are only estimates and may vary according to how the class is progressing _Each part should last for 50 minutes and will be followed by a 10 minutes break._ ## DAY 1 ### Part I Kubernetes intro (history, lineage, web resources) Minikube (installation, basic usage, relation to other k8s deployment) ### Part II Using kubectl (interact with your Kubernetes cluster, introduce basic primitives: pods, deployments, replica set, services) API resources and specification (json/yaml manifests) ### Part III Labels (the why and how about labels) Services (how to expose applications to internet, service types, DNS) ## DAY 2 ### Part I Scaling, rolling updates and rollbacks Ingress controllers (another way to expose apps using Ingress resources) ### Part II Volumes (define volumes in Pods) DaemonSets (for admins who want to run system daemons via k8s) ### Part III Third-party resources (and why they're important) Python client (custom controller 101, write a basic controller in Python) ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================   # Kubernetes Training and Cookbook This repository contains instructions and examples for the O'Reilly Live Online Training for [Kubernetes](https://kubernetes.io). Dates are listed in the [O'Reilly Live Online](https://www.safaribooksonline.com/live-training/) training schedule. It also contains [examples and scripts](./cookbook) used in the Kubernetes [Cookbook](http://shop.oreilly.com/product/0636920064947.do). ## Prerequisites In this training we will use [minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) to run a local Kubernetes instance. We will access this local Kubernetes with the client called `kubectl`. * Install [minikube](https://github.com/kubernetes/minikube/releases) * Install [kubectl](https://kubernetes.io/docs/user-guide/prereqs/) Verify your installation: ``` $ minikube version minikube version: v0.16.0 $ minikube start $ kubectl version Client Version: version.Info{Major:"1", Minor:"5", GitVersion:"v1.5.2", GitCommit:"08e099554f3c31f6e6f07b448ab3ed78d0520507", GitTreeState:"clean", BuildDate:"2017-01-12T04:57:25Z", GoVersion:"go1.7.4", Compiler:"gc", Platform:"darwin/amd64"} Server Version: version.Info{Major:"1", Minor:"5", GitVersion:"v1.5.2", GitCommit:"08e099554f3c31f6e6f07b448ab3ed78d0520507", GitTreeState:"clean", BuildDate:"1970-01-01T00:00:00Z", GoVersion:"go1.7.1", Compiler:"gc", Platform:"linux/amd64"} ``` If you are impatient, you can now start playing with Kubernetes: * Create a deployment with `kubectl run ghost --image=ghost` * Do you see a running _Pod_ : `kubectl get pods` * Check `kubectl --help` what can you do ? ## Links * Kubernetes [website](https://kubernetes.io) * Official Kubernetes [Documentation](https://kubernetes.io/docs/) * Research paper describing [_Borg_](https://research.google.com/pubs/pub43438.html) * Kubernetes YouTube [channel](https://www.youtube.com/channel/UCZ2bu0qutTOM0tHYa_jkIwg/featured) * Cloud Native Computing Foundation Youtube [channel](https://www.youtube.com/channel/UCvqbFHwN-nwalWPjPUKpvTA/feed) ## Instructor *Sebastien Goasguen* is a twenty year open source veteran. A member of the Apache Software Foundation, he worked on Apache CloudStack and Libcloud for several years before diving into the container world. He is the founder of [Skippbox](http://www.skippbox.com), a Kubernetes startup that develops open source tools for Kubernetes users. An avid blogger he enjoys spreading the word about new cutting edge technologies and also trains developers and sysadmins on all things Docker and Kubernetes. Sebastien is the author of the O’Reilly Docker Cookbook and 60 Recipes for Apache CloudStack. ## Code of Conduct Since this is an official O'Reilly Training, we will adhere to the [O'Reilly conferences Code of Conduct](http://www.oreilly.com/conferences/code-of-conduct.html). _"At O'Reilly, we assume that most people are intelligent and well-intended, and we're not inclined to tell people what to do. However, we want every O'Reilly conference to be a safe and productive environment for everyone. To that end, this code of conduct spells out the behavior we support and don't support at conferences."_ ## Trademark Kubernetes is a registered trademark of the [Linux Foundation](https://www.linuxfoundation.org/trademark-usage). ================================================ FILE: history/03012019/oreilly.txt ================================================ 517 kubectl get pods 518 kubectl logs redis-dff85b6f4-4sq4l 519 kubectl exec -ti redis-dff85b6f4-4sq4l -- /bin/sh 520 vi pod.yaml 521 kubectl create -f pod.yaml 522 kubectl get pods 523 more pod.yaml 524 kubectl get pods 525 kubectl delete pods oreilly 526 kubectl delete pods redis-dff85b6f4-4sq4l 527 kubectl get pods 528 kubectl delete pods redis-dff85b6f4-w6k7c 529 kubectl get pods 530 clear 531 ls -l 532 clear 533 kubectl get pods 534 clear 535 kubectl get pods 536 more pod.yaml 537 kubectl create -f pod.yaml 538 kubectl get pods 539 kubectl get pods 540 kubectl delete pods oreilly 541 kubectl get pods 542 clear 543 vi rs.yaml 544 kubectl create -f rs.yaml 545 vi rs.yaml 546 kubectl create -f rs.yaml 547 more rs.yaml 548 kubectl get pods 549 kubectl delete pods oreilly-j78mg 550 kubectl get pods 551 kubectl get pods 552 more rs.yaml 553 more pod.yaml 554 kubectl get pods 555 kubectl get rs 556 kubectl get replicaset 557 more rs.yaml 558 kubectl get pods --show-labels 559 kubectl get pods -l tough=demo 560 git remote -v 561 cd manifests/ 562 ls -l 563 cd 01-pod/ 564 ls -l 565 more redis.yaml 566 cd .. 567 ls -l 568 cd 03-rs 569 ls -l 570 more rs.yaml 571 clear 572 ls -l 573 clear 574 kubectl get pods 575 kubectl get pods --v=9 576 clear 577 curl localhost:8001 578 curl localhost:8001/api/v1 579 curl localhost:8001/apis/apps/v1 580 kubectl get pods 581 kubectl delete pods oreilly-dbxfr --v=9 582 clear 583 kubectl get pods 584 kubectl get pods oreilly-5m87t -o yaml 585 kubectl get pods oreilly-5m87t -o json 586 kubectl get pods oreilly-5m87t -o yaml 587 clear 588 curl localhost:8001/api/v1/namespaces/default/pods/oreilly-5m87t 589 kubectl get pods 590 curl -XDELETE localhost:8001/api/v1/namespaces/default/pods/oreilly-5m87t 591 kubectl get pods 592 clear 593 kubectl get pods 594 cd .. 595 cd .. 596 ls -l 597 more pod.yaml 598 kubectl create -f pod.yaml 599 kubectl get pods 600 vi pod.yaml 601 kubectl create -f pod.yaml 602 kubectl create namespace foo 603 kubectl create -f pod.yaml --namespace foo 604 kubectl get pods 605 kubectl get pods --all-namespaces 606 curl localhost:8001/api/v1/namespaces/default/pods 607 curl localhost:8001/api/v1/namespaces/foo/pods 608 curl localhost:8001/api/v1/namespaces/foo/pods |jq -r .items[].smetadata.name 609 curl localhost:8001/api/v1/namespaces/foo/pods |jq -r .items[].metadata.name 610 curl -s localhost:8001/api/v1/namespaces/foo/pods |jq -r .items[].metadata.name 611 curl -s localhost:8001/api/v1/namespaces/default/pods |jq -r .items[].metadata.name 612 kubectl get ns 613 kubectl get pods 614 kubectl create quota oreilly --hard=pods=4 615 kubectl get resourcequota 616 kubectl get resourcequota oreilly -o yaml 617 vi pod.yaml 618 kubectl create -f pod.yaml 619 kubectl create -f pod.yaml -n foo 620 clear 621 kubectl get pods 622 kubectl delete pods oreilly 623 more pod.yaml 624 kubectl create -f pod.yaml 625 kubectl get pods 626 kubectl logs foo 627 kubectl get pods 628 vi svc.yaml 629 kubectl get pods --show-labels 630 kubectl labels pods foo video=online 631 kubectl label pods foo video=online 632 kubectl get pods --show-labels 633 vi svc.yaml 634 more svc.yaml 635 kubectl create -f svc.yaml 636 kubectl get svc 637 kubectl get services 638 kubectl edit svc foo 639 kubectl get services 640 kubectl get services -w 641 kubectl get pods 642 kubectl logs foo 643 clear 644 kubectl get pods 645 kubectl get svc 646 vi pod.yaml 647 kubectl create -f pod.yaml 648 kubectl get pods 649 kubectl delete pods food 650 kubectl delete pods foo 651 kubectl create -f pod.yaml 652 kubectl get pods 653 kubectl logs nginxredis -c redis 654 kubectl logs nginxredis -c nginx 655 clear 656 kubectl get pods 657 kubectl get svc 658 kubectl get endpoints 659 more svc.yaml 660 kubectl get pods --show-labels 661 kubectl label pod nginxredis video=online 662 kubectl get pods --show-labels 663 kubectl get endpoints 664 kubectl get pods nginxredis -o json | jq -r .status.PodIP 665 kubectl get pods nginxredis -o json | jq -r .status.podIP 666 kubectl run -it --rm busy --image=busybox:1.27 -- /bin/sh 667 kubectl run -it --rm busybox --image=busybox:1.27 -- /bin/sh 668 kubectl get pods oreilly-t9jq8 -o yaml 669 kubectl exec -ti oreilly-t9jq8 -- /bin/sh 670 clear 671 kubectl get svc 672 kubectl delete svc foo 673 kubectl get nodes 674 clear 675 kubectl get pods 676 kubectl get ns 677 kubectl delete ns foo 678 kubectl get ns 679 kubectl get pods 680 clear 681 kubectl get pods 682 kubectl delete pods nginxredis 683 kubectl delete rs oreilly 684 ls -l 685 kubectl get pods 686 kubectl run --help 687 kubectl run nginx --image=nginx --dry-run -o json 688 kubectl run nginx --image=nginx --restart=never --dry-run -o json 689 kubectl run nginx --image=nginx --restart=Never --dry-run -o json 690 kubectl create 691 kubectl create services --help | more 692 kubectl create service 693 kubectl create service clusterip 694 kubectl create service clusterip -h 695 clear 696 kubectl create service 697 kubectl run --rm -it busy --image=busybox:1.29 -- /bin/sh 698 kubectl delete deployments busy 699 kubectl run --rm -it busy --image=busybox:1.29 -- /bin/sh 700 kubectl get deployments 701 kubectl delete deployments busybox 702 clear 703 clear 704 kubectl get nodes 705 ls -l 706 more rs.yaml 707 vi rs.yaml 708 kubectl create -f rs.yaml 709 kubectl get rs 710 kubectl get pods 711 kubectl describe pods oreilly-bds8g 712 kubectl describe pods oreilly-bds8g 713 kubectl get pods 714 kubectl logs oreilly-bds8g 715 ls -l 716 clear 717 kubectl get pods 718 kubectl expose --help 719 clear 720 kubectl get rs 721 kubectl expose rs oreilly --port 80 --target-port 2368 722 kubectl get svc 723 kubectl get svc oreilly -o yaml 724 kubectl get pods -l day=second 725 kubectl get endpoints 726 kubectl get pods 727 kubectl run --it --rm debug --image=busybox:1.29 -- /bin/sh 728 kubectl run -it --rm debug --image=busybox:1.29 -- /bin/sh 729 kubectl run -it --rm debug1 --image=busybox:1.27 -- /bin/sh 730 kubectl run -it --rm debug1 --image=busybox:1.27 -- /bin/sh 731 kubectl run -it --rm debug2 --image=busybox:1.27 -- /bin/sh 732 clear 733 kubectl get pods 734 kubectl get svc 735 kubectl edit svc oreilly 736 kubectl get svc -w 737 clear 738 kubectl run nginx --image=nginx 739 kubectl get pods 740 kubectl scale deployment nginx --replicas 4 741 kubectl get pods 742 kubectl get rs 743 kubectl describe rs nginx-65899c769f 744 kubectl get resourcequota 745 kubectl edit resourcequota oreilly 746 kubectl get pods 747 kubectl set image deployment nginx nginx=nginx:1.1111 748 kubectl get pods 749 kubectl describe pods nginx-6f575df45b-dbjhc 750 kubectl get pods 751 kubectl set image deployment nginx nginx=redis 752 kubectl get rs -w 753 kubectl get pods 754 kubectl get rs 755 kubectl rollout history deployment nginx 756 kubectl rollout undo deployment nginx --to-revision 1 757 kubectl rollout history deployment nginx 758 kubectl get rs 759 kubectl get pods 760 kubectl get pods --show-labels 761 kubectl get pods -l run=nginx 762 kubectl get pods -l run=nginx -o json | jq -r .items[].spec.containers[0].image 763 kubectl rollout history deployment nginx 764 kubectl rollout undo deployment nginx --to-revision 2 765 kubectl get pods -l run=nginx -o json | jq -r .items[].spec.containers[0].image 766 kubectl get pods 767 kubectl rollout history deployment nginx 768 kubectl rollout undo deployment nginx --to-revision 4 769 kubectl get pods 770 kubectl get deployments 771 kubectl get replicasets 772 kubectl get pods 773 kubectl expose deployments nginx --port 80 --type LoadBalancer 774 kubectl get svc 775 kubectl get svc -w 776 kubectl create -f https://raw.githubusercontent.com/kubernetes/examples/master/guestbook/all-in-one/guestbook-all-in-one.yaml 777 kubectl get deployments 778 kubectl get pods 779 kubectl get pods 780 kubectl get svc 781 kubectl delete deployment nginx 782 kubectl delete svc oreilly nginx 783 kubectl delete deployment redis 784 clear 785 kubectl get svc 786 kubectl edit svc frontend 787 kubectl get svc -w 788 kubectl get pods 789 kubectl scale deployments redis-slave --replicas 4 790 kubectl get pods 791 kubectl get pods 792 kubectl exec -ti redis-master-55db5f7567-qthrj -- redis-cli info 793 kubectl scale deployments redis-slave --replicas 1 794 kubectl get pods 795 kubectl exec -ti redis-master-55db5f7567-qthrj -- redis-cli info 796 kubectl exec -ti redis-master-55db5f7567-qthrj -- redis-cli info 797 kubectl exec -ti redis-master-55db5f7567-qthrj -- redis-cli info 798 kubectl exec -ti redis-master-55db5f7567-qthrj -- redis-cli info 799 kubectl exec -ti redis-master-55db5f7567-qthrj -- redis-cli info 800 clear 801 kubectl run mysql --image=mysql:5.5 --env MYSQL_ROOT_PASSWORD=root 802 kubectl expose deployment mysql --port 3306 803 kubectl get pods 804 kubectl run wordpress --image=wordpress --env WORDPRESS_DB_HOST=mysql --env WORDPRESS_DB_PASSWORD=root 805 kubectl expose deployment wordpress --port 80 --type LoadBalancer 806 kubectl get pods 807 kubectl exec -ti mysql-55d65b64bb-x7ts6 -- mysql -uroot -p 808 clear 809 ls -l 810 kubectl run -it debuggg --image=busybox:1.27 -- /bin/sh 811 kubectl get pods 812 kubectl get svc 813 clear 814 ls -l 815 clear 816 kubectl get pods 817 kubectl delete rs oreilly 818 kubectl delete deployment debuggg 819 clear 820 kubectl get pods 821 ls -l 822 ls -l 823 clear 824 ls -l 825 vi index.html 826 cat index.html 827 kubectl create configmap www --from-file=index.html 828 kubectl get configmap 829 kubectl get cm 830 kubectl get cm www -o yaml 831 vi w.yaml 832 more w.yaml 833 kubectl create -f w.yaml 834 kubectl get pods 835 kubectl exec -ti www -- ls -l /usr/share/nginx/html 836 kubectl expose pod www --port 80 --type LoadBalancer 837 kubectl label pod www foo=bar 838 kubectl expose pod www --port 80 --type LoadBalancer 839 kubectl get svc -w 840 kubectl logs www 841 kubectl logs -f www 842 clear 843 ls -l 844 more w.yaml 845 ls -l 846 cd manifests/ 847 ls -l 848 cd 06-volumes/ 849 ls -l 850 pwd 851 more configmap.yaml 852 ls -l 853 more cm-vol.yaml 854 ls -l 855 more foobar.md 856 ls -l 857 more volumes.yaml 858 kubectl create -f volumes.yaml 859 kubectl get pods 860 kubectl exec -ti vol -c busy -- ls -l /busy 861 kubectl cp --help 862 kubectl cp volumes.yaml vol:/busy/volumes.yaml -c busy 863 kubectl exec -ti vol -c busy -- ls -l /busy 864 kubectl exec -ti vol -c box -- ls -l /busy 865 kubectl exec -ti vol -c box -- ls -l /box 866 kubectl exec -ti vol -c box -- cat /box/volumes.yaml 867 ls -l 868 kubectl get pods 869 kubectl delete deployments wordpress mysql 870 kubectl delete pods vol www 871 clear 872 kubectl get pods 873 ls -l 874 more pvc.yaml 875 kubectl get persistentvolumeclaim 876 kubectl get persistentvolume 877 kubectl get pvc 878 kubectl get pv 879 kubectl create -f pvc.yaml 880 kubectl get pv 881 kubectl get pvc 882 kubectl get pv pvc-bea040c8-1033-11e9-bbda-42010a8000a0 -o yaml 883 ls -l 884 more mysql.yaml 885 kubectl get pvc 886 kubectl create -f mysql.yaml 887 kubectl get pods 888 kubectl get pods 889 kubectl get pods 890 kubectl get pods 891 kubectl get pods -w 892 kubectl exec -ti data -- mysql -uroot -p 893 kubectl get pods 894 kubectl delete pods data 895 kubectl get pods 896 kubectl get pvc 897 ls -l 898 more mysql.yaml 899 ls -l 900 cp hostpath.yaml test.yaml 901 vi test.yaml 902 kubectl create -f test.yaml 903 kubectl get pods 904 kubectl get pods 905 kubectl get pods 906 kubectl exce -ti pvctest -- ls -l /oreilly 907 kubectl exec -ti pvctest -- ls -l /oreilly 908 kubectl exec -ti pvctest -- ls -l /oreilly/oreilly 909 kubectl get pods 910 kubectl delete pods pvctest 911 clear 912 kubectl run foo --image=ghost --dry-run -o yaml 913 kubectl run foo --image=ghost --dry-run -o json 914 kubectl run foo --image=ghost --dry-run -o json > foo.json 915 more foo.json 916 kubectl get pods 917 kubectl run foo --image=ghost --restart=Never --dry-run -o yaml 918 kubectl get pods 919 kubectl expose deployment frontend --port 80 --dry-run -o yaml 920 kubectl create 921 kubectl create quote foo --hard=pod=2 --dry-run -o yaml 922 kubectl create quota foo --hard=pod=2 --dry-run -o yaml 923 clear 924 which helm 925 helm init 926 kubectl get pods -n kube-system 927 clear 928 helm repo list 929 helm search redis 930 helm install stable/redis 931 helm repo update 932 helm install stable/redis 933 helm inspect stable/redis 934 helm install stable/redis-ha 935 helm create oreilly 936 tree oreilly/ 937 cd oreilly/templates/ 938 ls -l 939 more deployment.yaml 940 clear 941 cd .. 942 cd .. 943 ls -l 944 cd .. 945 ls -l 946 cd 07-crd/ 947 ls -l 948 clear 949 kubectl get database 950 more database.yml 951 kubectl create -f database.yml 952 kubectl get database 953 kubectl proxy ?& 954 kubectl proxy & 955 ps -ef |grep proxy 956 curl localhost:8001/apis/ 957 curl localhost:8001/ 958 curl localhost:8001/apis/foo.bar/v1 959 ls -l 960 more db.yml 961 kubectl create -f db.yml 962 kubectl get db 963 kubectl get db my-new-db -o yaml 964 kubectl edit db my-new-db 965 kubectl get db 966 clear 967 ipython 968 cd .. 969 ls -l 970 cd 05-ingress-controller/ 971 ls -l 972 more frontend.yaml ================================================ FILE: history/09282018/history.txt ================================================ 1 kubectl get pods 2 which git 3 sudo su apt-get install -y git 4 sudo su apt-get install git 5 sudo su 6 kubectl get pods 7 sudo kubectl get pods 8 kubectl get pods 9 clear 10 git clone https://github.com/sebgoa/oreilly-kubernetes.git 11 ls -l 12 cd oreilly-kubernetes/ 13 ls -l 14 cd manifests/ 15 ls -l 16 clear 17 tree 18 clear 19 sudo su 20 tree 21 clear 22 kubectl get pods 23 sudo su 24 kubectl get pods 25 clear 26 lear 27 clear 28 kubectl get nodes 29 cd .. 30 git remote -v 31 cd manifests/ 32 ls -l 33 cd 01-pod/ 34 ls -l 35 more redis.yaml 36 vi pod.yaml 37 more pod.yaml 38 kubectl create -f pod.yaml 39 clear 40 kubectl get pods 41 kubectl get pods --show-labels 42 kubectl label pods oreilly app=nginx 43 kubectl get pods --show-labels 44 kubectl expose pods oreilly --port 80 --type NodePort -o json --dry-run 45 kubectl expose pods oreilly --port 80 --type NodePort -o yaml --dry-run 46 kubectl expose pods oreilly --port 80 --type NodePort -o yaml --dry-run > svc.yaml 47 kubectl create -f svc.yaml 48 kubectl get svc 49 kubectl get endpoints 50 clear 51 kubectl get pods 52 which js 53 which jq 54 sudo su 55 clear 56 which jq 57 kubectl get --help 58 clear 59 kubectl get pods -o json |jq -r 60 kubectl get pods -o json |jq -r .status.podIP 61 kubectl get pods -o json |jq -r .items[].status.podIP 62 kubectl get endpoints 63 clear 64 kubectl run mysql --image=mysql:5.5 --env MYSQL_ROOT_PASSWORD=root 65 kubectl expose deployment mysql --port 3306 66 kubectl run wordpress --image=wordpress --env WORDPRESS_DB_PASSWORD=root --env WORDPRESS_DB_HOST=mysql 67 kubectl expose deployment wordpress --port 3306 --type NodePort 68 kubectl get pods 69 kubectl get svc 70 clear 71 kubectl get svc 72 kubectl get endpoints 73 kubectl get pods --show-labels 74 kubectl exec -it mysql-796fbf7-42hhj mysql -uroot -p 75 kubectl exec -it mysql-796fbf7-42hhj -- mysql -uroot -p 76 clear 77 kubectl get pods 78 kubectl logs wordpress-7c78cb8675-w5dbv 79 kubectl get pods 80 kubectl get svc 81 kubectl edit svc wordpress 82 kubectl get svc 83 clear 84 ls -l 85 cd .. 86 ls -l 87 06-volumes/ 88 ls -l 89 cd 06-volumes/ 90 ls -l 91 more volumes.yaml 92 kubectl create secret 93 kubectl create secret generic foo --from-literal=secret=password 94 kubectl get secrets 95 vi file.txt 96 cat file.txt 97 kubectl create cm foo --from-file=file.txt 98 kubectl get cm 99 clear 100 kubectl get secrets 101 kubectl get cm 102 kubectl get cm foo -o yaml 103 vi vol.yaml 104 kubectl create -f vol.yaml 105 clear 106 kubectl get pod 107 kubectl exec -ti foo -- ls -l /tmp 108 kubectl exec -ti foo -- ls -l /tmp/cm 109 kubectl exec -ti foo -- ls -l /tmp/secret 110 kubectl exec -ti foo -- cat /tmp/cm/file.txt 111 kubectl exec -ti foo -- cat /tmp/secret/password 112 kubectl exec -ti foo -- cat /tmp/secret/secret 113 more pod 114 more vol.yaml 115 clear 116 ls -l 117 kubectl get deployments 118 kubectl get svc 119 cd .. 120 ls -l 121 mkdir rydercup 122 cd rydercup/ 123 ls -l 124 clear 125 kubectl get deployments wordpress --export -o yaml 126 kubectl get deployments wordpress --export -o yaml > wp.yaml 127 kubectl get deployments mysql --export -o yaml > mysql.yaml 128 ls -l 129 kubectl get svc wordpress --export -o yaml > wp-svc.yaml 130 kubectl get svc mysql --export -o yaml > mysql-svc.yaml 131 ls -l 132 more wp-svc.yaml 133 kubectl delete deployments wordpress mysql 134 kubectl delete svc wordpress mysql 135 kubectl get pods 136 kubectl delete pods foo oreilly 137 kubectl get pods 138 clear 139 ls -l 140 kubectl create -f mysql-svc.yaml 141 kubectl create -f mysql.yaml 142 kubectl create -f wp.yaml 143 kubectl create -f wp-svc.yaml 144 kubectl get pods 145 kubectl delete -f wp.yaml 146 kubectl get pods 147 kubectl create -f wp.yaml 148 kubectl get pods 149 vi wp-svc.yaml 150 kubectl replace -f wp-svc.yaml 151 kubectl delete -f wp-svc.yaml 152 kubectl replace -f wp-svc.yaml 153 kubectl create -f wp-svc.yaml 154 ls -l 155 kubectl delete -f mysql-svc.yaml 156 kubectl delete -f mysql.yaml 157 kubectl delete -f wp.yaml 158 kubectl delete -f wp-svc.yaml 159 cd .. 160 kubectl get pods 161 ls -l 162 kubectl get pods 163 kubectl apply -f ./rydercup/ 164 kubectl get pods 165 vi rydercup/wp.yaml 166 kubectl apply -f ./rydercup/ 167 kubectl get pods --show-labels 168 vi rydercup/wp.yaml 169 kubectl apply -f ./rydercup/ 170 kubectl get pods --show-labels 171 ls -l 172 kubectl edit deployment wp 173 kubectl edit deployment wordpress 174 clear 175 kubectl get pods 176 kubectl delete -f ./rydercup/ 177 kubectl get pods 178 clear 179 kubectl get pods 180 which helm 181 helm version 182 kubectl version 183 helm 184 kubectl get pods -n kube-system 185 helm repo list 186 helm init --client-only 187 helm repo list 188 helm search redis 189 helm search minio 190 helm inspect stable/minio 191 helm inspect values stable/minio 192 helm install stable/minio 193 kubectl get svc 194 kubectl edit svc vociferous-condor-minio 195 kubectl get svc 196 clear 197 ls -l 198 cd rydercup/ 199 ls -l 200 cd .. 201 helm 202 helm create oreilly 203 tree oreilly/ 204 cd oreilly/ 205 ls 206 ls -l 207 rm -rf charts/ 208 vi Chart.yaml 209 vi values.yaml 210 rm values.yaml 211 vi values.yaml 212 cd .. 213 ls -l 214 rm -rf oreilly/ 215 ls -l 216 kubectl get svc 217 kubectl edit svc vociferous-condor-minio 218 ls -l 219 helm 220 helm create oreilly 221 tree oreilly/ 222 cd oreilly/ 223 ls -l 224 rm -rf charts/ 225 vi Chart.yaml 226 ls -l 227 vi values.yaml 228 more values.yaml 229 cd templates/ 230 ls -l 231 rm *.yaml 232 ls -l 233 rm _helpers.tpl 234 rm N 235 rm NOTES.txt 236 ls -l 237 cp ../../rydercup/* 238 ls -l 239 cp ../../rydercup/* . 240 ls -l 241 vi wp.yaml 242 cd .. 243 ls -l 244 cd .. 245 ls -l 246 helm ls 247 helm install ./oreilly/ 248 helm create foo 249 cd foo/templates/ 250 ls -l 251 more deployment.yaml 252 cd .. 253 vi oreilly/templates/wp.yaml 254 helm install ./oreilly/ 255 ls -l 256 cd rydercup/ 257 ls -l 258 cd .. 259 cd oreilly/ 260 ls -l 261 cd templates/ 262 ls -l 263 vi pvc.yaml 264 vi mysql.yaml 265 more pvc.yaml 266 ls -l 267 cd .. 268 helm ls 269 helm wobbly-cricket 270 helm delete wobbly-cricket 271 helm delete vociferous-condor 272 kubectl get pods 273 clear 274 kubectl get pods 275 clear 276 kubectl get pods 277 clear 278 kubectl get pods 279 ls -l 280 helm install ./oreilly/ 281 kubectl get pods 282 kubectl get pvc 283 kubectl get pv 284 kubectl get svc 285 cat oreilly/templates/pvc.yaml 286 cd .. 287 ls -l 288 wget https://github.com/kubernetes/kompose/releases/download/v1.16.0/kompose-linux-amd64 289 ls -l 290 mv kompose-linux-amd64 kompose 291 chmod 744 kompose 292 kompose 293 ./kompose 294 vi docker-compose.yaml 295 ./kompose convert 296 ls -l 297 vi docker-compose.yaml 298 ./kompose convert 299 ls -l 300 more frontend-deployment.yaml 301 ./kompose up 302 kubectl proxy & 303 ps -ef|grep proxy 304 kill -9 24762 305 ps -ef|grep proxy 306 kubectl proxy --port 8080& 307 ./kompose up 308 kubectl get pods 309 kubectl get svc 310 kubectl get pods 311 ls -l 312 cd scripts/ 313 ls -l 314 more create_pod.py 315 ls -l 316 more create_cronjob.py 317 ls -l 318 cd .. 319 ls -l 320 cd oreilly-kubernetes/ 321 ls -l 322 rm oreilly-0.1.0.tgz 323 ls -l 324 cd m 325 cd manifests/ 326 ls -l 327 rm -rf rydercup/ 328 rm -rf oreilly/ 329 ls -l 330 cd 07-crd 331 ls -l 332 more database.yml 333 kubectl get databases 334 kubectl create -f database.yml 335 kubectl get databases 336 kubectl apiversions 337 kubectl api-versions 338 curl localhost:8080/foo.bar/v1 339 curl localhost:8080/apis/foo.bar/v1 340 more db.yml 341 kubectl apply -f db.yml 342 kubectl get db 343 kubectl get db my-new-db -o yaml 344 vi db.yml 345 kubectl apply -f db.yml 346 kubectl get databases ================================================ FILE: history/11042017/history.txt ================================================ 7 which gcloud 8 gcloud container clusters list 9 gcloud container clusters create foobar 10 clear 11 gcloud container clusters list 12 kubectl get nodes 13 kubectl config view 14 kubectl config view | more 15 clear 16 kubectl config use-context minikube 17 kubectl get nodes 18 kubectl config use-context gke_skippbox_europe-west1-b_foobar 19 kubectl get nodes 20 clear 25 kubectl get nodes 26 kubectl config use-context minikube 27 kubectl get nodes 28 minikube version 29 minikube dashboard 30 clear 31 kubectl get pods 32 kubectl get deployments 33 kubectl get replicasets 34 kubectl logs redis-3215927958-kqvfb 35 minikube ssh 36 clear 37 kubectl get pods 38 kubectl scale deployments redis --replicas=4 39 kubectl get pods 40 kubectl get pods 41 kubectl delete pods redis-3215927958-tqp1q 42 kubectl get pods 43 kubectl scale deployments redis --replicas=2 44 kubectl scale deployments redis --replicas=0 45 clear 46 kubectl get pods 47 kubectl get deployments 48 kubectl scale deployments redis --replicas=1 49 kubectl get deployments 50 kubectl get pods 51 clear 52 ls -l 53 vi foo.yaml 54 kubectl create -f foo.yaml 55 kubectl get pods 56 more foo.yaml 57 kubectl get redis-3215927958-h243b -o yaml | more 58 kubectl get pods redis-3215927958-h243b -o yaml | more 59 clear 60 more foo.yaml 61 kubectl get pods 62 vi foo.yaml 63 kubectl apply -f foo.yaml 64 kubectl get pods 65 kubectl delete pods foobar 66 kubectl apply -f foo.yaml 67 clear 68 kubectl get pods 69 kubectl get pods 70 kubectl get pods 71 kubectl apply -f foo.yaml 72 kubectl get pods 73 clear 74 kubectl get pods 75 kubectl --v=99 get pods | more 76 kubectl --v=99 delete pods foobar | more 77 clear 78 minikube ssh 79 clear 80 kubectl get pods 81 kubectl exec -ti redis-3215927958-h243b -- /bin/bash 82 which redis-cli 83 which brew 84 clear 85 kubectl get namespace 86 kubectl get ns 87 kubectl get pods --all-namespaces 88 more foo.yaml 89 kubectl apply -f foo.yaml 90 kubectl get pods 91 kubectl get pods 92 kubectl create -f foo.yaml 93 clear 94 kubectl create ns oreilly 95 kubectl get ns 96 vi foo.yaml 97 kubectl create -f foo.yaml 98 kubectl get pods 99 kubectl get pods --all-namespaces 100 minikube ssh 101 clear 102 kubectl get pods 103 ls -l 104 cd manifests/ 105 ls -l 106 more rq.yaml 107 clear 108 kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/guestbook/all-in-one/guestbook-all-in-one.yaml 109 kubectl get pods 110 kubectl get deployments 111 kubectl get service 112 clear 113 kubectl get pods 114 ls -l 115 git remote -v 116 more rq.yaml 117 kubectl get pods --namespace oreilly 118 vi quota.yaml 119 kubectl create -f quota.yaml 120 kubectl get resourcequotas 121 kubectl get resourcequotas --namespace oreilly 122 vi test.yaml 123 kubectl get pods --namespace oreilly 124 kubectl create -f test.yaml 125 kubectl edit resourcequotas counts --namespace oreilly 126 kubectl create -f test.yaml 127 kubectl get pods --namespace oreilly 128 kubectl get pods 129 kubectl get svc 130 kubectl edit svc frontend 131 kubectl get svc 132 minikube ip 133 kubectl run ghost --image=ghost 134 kubectl get pods 135 kubectl get deployments 136 kubectl expose deployment ghost --port=2368 --type=NodePort 137 kubectl get svc 138 clear 139 kubectl get pods 140 kubectl get pods foobar -o yaml 141 clear 142 kubectl get pods 143 kubectl get pods --show-labels 144 kubectl get pods -Lrun 145 kubectl get pods -l run=ghost 146 kubectl scale deployments ghost --replicas=3 147 kubectl get pods -l run=ghost 148 clear 149 kubectl get deployments 150 kubectl get rs 151 kubectl get rs ghost-2663835528 -o yaml 152 kubectl get pods 153 kubectl get pods -l run=ghost 154 kubectl label pods ghost-2663835528-1269w run=ghost-not-working --overwrite 155 kubectl get pods -Lrun 156 clear 157 kubectl get svc 158 kubectl get svc ghost -o yaml 159 kubectl get endpoints 160 kubectl scale deployments ghost --replicas=1 161 kubectl get pods -l run=ghost 162 kubectl get endpoints 163 clear 164 clear 165 kubectl get pods 166 kubectl delete ns oreilly 167 kubectl delete deployments frontend, redis-master, redis-slave 168 kubectl delete deployments frontend redis-master redis-slave 169 kubectl get pods 170 kubectl delete deployment ghost 171 kubectl delete pods foobar 172 kubectl get ns 173 kubectl get pods 174 kubectl delete deployment redis 175 kubectl get pods 176 kubectl get pods 177 kubectl get svc 178 kubectl delete svc frontend ghost redis-master redis-slave ================================================ FILE: history/14022016/history-1.txt ================================================ kubectl get pods 509 kubectl get rc 510 kubectl get rs 516 kubectl create -f redis-rc.yaml 517 kubectl get rc 518 kubectl get rs 519 kubectl delete deployments redis 520 kubectl get rs 521 kubectl get pods 522 kubectl delete pods busybox 523 kubectl delete pods multi 525 kubectl get pods 526 kubectl get rc 527 kubectl get rc redis -o yaml 528 kubectl get rc redis -o yaml | more 531 kubectl get rc 532 kubectl get pods 534 kubectl scale rc redis --replicas=4 535 kubectl get pods 537 kubectl get pods --show-labels 538 kubectl label pods redis-pt732 app=foobar 539 kubectl label pods --overwrite redis-pt732 app=foobar 540 kubectl get pods --show-labels 541 kubectl delete rc redis 542 kubectl get pods --show-labels 543 kubectl logs redis-pt732 545 kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/guestbook/all-in-one/guestbook-all-in-one.yaml 546 kubectl get pods 547 kubectl exec -ti redis-master-343230949-lktfz -- redis-cli 549 kubectl get deployments 550 kubectl scale deployments redis-slave --replicas=5 551 kubectl get pods 552 kubectl exec -ti redis-master-343230949-lktfz -- redis-cli info 554 kubectl get pods 555 kubectl get svc 556 kubectl get services 557 kubectl edit svc frontend 558 kubectl describe svc frontend 560 kubectl get svc 561 kubectl create -f busybox.yaml 562 kubectl get pods 563 kubectl exec -ti busybox -- nslookup frontend 564 kubectl exec -ti busybox -- nslookup redis-master 565 kubectl exec -ti busybox -- nslookup redis-slave 567 kubectl get endpoints 568 kubectl get pods 569 kubectl get svc frontend -o yaml 570 kubectl get pods --show-labels 572 kubectl get pods 573 kubectl run ghost --image=ghost 574 kubectl expose deployment ghost --port=2368 --type=NodePort 575 kubectl get pods 576 kubectl get svc 577 kubectl get pods 579 kubectl get svc 580 kubectl get endpoints ================================================ FILE: history/14022016/history.txt ================================================ gcloud container clusters create oreilly 644 clear 645 kubectl get nodes 646 kubectl config use-context minikube 647 kubectl get nodes 648 vi ~/.kube/config 649 kubectl config use-context gke_skippbox_europe-west1-b_oreilly 650 kubectl get nodes 651 kubectl config use-context minikube 652 kubectl get nodes 653 minikube ssh 654 clear 655 minikube status 656 kubectl get nodes 657 minikube dashboard 658 kubectl get pods 659 kubectl get replicasets 660 kubectl get deployments 661 kubectl scale deployments redis --replicas=5 662 kubectl get replicasets 663 kubectl get pods 664 kubectl scale deployments redis --replicas=2 665 kubectl get pods 666 clear 667 kubectl get pods 668 kubectl exec -ti redis-3133791336-1cgrj -- redis-cli 669 clear 670 kubectl get pods 671 minikube ssh 672 clear 673 kubectl get pods 674 redis-cli 675 clear 676 kubectl get pods 677 kubectl get po 678 kubectl get rs 679 kubectl get deployments 680 kubectl get secrets 681 kubectl get configmaps 682 kubectl get persistentvolumes 683 kubectl get namespaces 684 kubectl get services 685 clear 686 kubectl get pods 687 kubectl delete pods redis-3133791336-1cgrj 688 kubectl get pods 689 kubectl get pods 690 kubectl scale deployment redis --replicas=1 691 kubectl get pods 692 kubectl --v=99 get pods 693 kubectl --v=99 get svc 694 clear 695 minikube ssh 696 clear 697 kubectl get pods 698 kubectl get pods redis-3133791336-c37p0 -o yaml 699 kubectl get pods redis-3133791336-c37p0 -o yaml | more 700 kubectl get pods redis-3133791336-c37p0 -o json |more 701 kubectl get pods redis-3133791336-c37p0 -o json | jq 702 ls -l 703 clear 704 git remote -v 705 ls -l 706 cd manifests/ 707 ls -l 708 clear 709 cat busybox.yaml 710 kubectl create -f busybox.yaml 711 kubectl get pods 712 kubectl get pods 713 kubectl exec -ti busybox 714 kubectl exec -ti busybox /bin/sh 715 clear 716 cat busybox.yaml 717 kubectl get pods 718 cp busybox.yaml multi.yaml 719 vi multi.yaml 720 kubectl create -f multi.yaml 721 clear 722 kubectl get pods 723 kubectl exec -ti multi /bin/sh 724 kubectl exec -ti multi -- redis-cli 725 kubectl exec -ti multi -c redis -- redis-cli 726 kubectl get pods 727 clear 728 kubectl get pods 729 kubectl get pods --all-namespaces 730 kubectl create ns oreilly 731 kubectl get ns 732 vi multi.yaml 733 kubectl create -f multi.yaml 734 kubectl get pods --all-namespaces 735 vi multi.yaml ================================================ FILE: history/14072017/14072017.txt ================================================ 18 minikube stop 19 minikube delete 20 curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.20.0/minikube-darwin-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/ 21 minikube version 22 minikube start 23 kubectl version 24 which kubectl 25 gcloud components updates 26 gcloud components update 27 which kubectl 28 kubectl version 29 kuebctl get nodes 30 kubectl get nodes 31 kubectl get pods 32 clear 33 gcloud container clusters list 34 gcloud container clusters delete redeye 35 clear 36 clear 37 clear 38 kubectl get pods --all-namespaces 39 clear 40 clear 41 minikube status 42 kubectl get pods 43 kubectl get nodes 44 clear 45 which gcloud 46 gcloud container cluster create foobar 47 gcloud container clusters create foobar 48 gcloud container clusters list 49 kubectl get nodes 50 kubectl config view use-context minikube 51 kubectl config view use-context minikubeclear 52 kubectl config use-context minikubeclear 53 kubectl config use-context minikube 54 kubectl get nodes 55 clear 56 minikube status 57 kubectl get pods 58 kubectl get nodes 59 kubectl run ghost --image=ghost 60 kubectl get pods 61 kubectl get pods 62 kubectl expose deployments ghost --port=2368 --type=NodePort 63 kubectl get service 64 minikube service ghost 65 minikube service ghost 66 clear 67 kubectl run nginx --image=nginx 68 kubectl expose deployments nginx --port=80 --type=NodePort 69 kubectl get pods 70 kubectl get services 71 minikube service nginx 72 kubectl get pods 73 kubectl exec -ti ghost-2663835528-z5sgd -- /bin/bash 74 kubectl logs ghost-2663835528-z5sgd 75 kubectl get pods 76 kubectl logs nginx-2371676037-kgfr3 77 clear 78 minikube dashboard 79 kubectl get pods 80 minikube ssh 81 minikube logs 82 clear 83 kubectl get pods 84 vi foobar.yml 85 kubectl create -f foobar.yml 86 kubectl get pods 87 kubectl get pods foobar -o yaml 88 kubectl get pods foobar -o yaml | more 89 kubectl get pods foobar -o json 90 kubectl get pods foobar -o json | jq -r 91 kubectl get pods foobar -o json | jq -r .spec 92 kubectl get pods foobar -o json | jq -r .spec.nodeName 93 clear 94 minikube ssh 95 clear 96 kubectl get 97 clear 98 kubectl get pods 99 kubectl delete pods foobar 100 kubectl get pods 101 kubectl get pods -v=9 ghost-2663835528-z5sgd 102 clear 103 curl -k -v -XGET -H "Accept: application/json" -H "User-Agent: kubectl/v1.7.0 (darwin/amd64) kubernetes/d3ada01" https://192.168.99.100:8443/api/v1/namespaces/default/pods/ghost-2663835528-z5sgd 104 minikube ssh 105 clear 106 kubectl get ns 107 kubectl get namespace 108 kubectl get pods --all-namespaces 109 kubectl create ns oreilly 110 kubectl get namespace 111 vi foobar.yml 112 kubectl create -f foobar.yml 113 vi foobar.yml 114 kubectl create -f foobar.yml 115 kubectl get pods --all-namespaces 116 ls -l 117 git remote -v 118 cd manifests/ 119 ls -l 120 git status -s 121 more quota.yaml 122 kubectl create -f quota.yaml 123 kubectl get pods -n oreilly 124 cd .. 125 vi foobar.yml 126 kubectl create -f foobar.yml 127 clear 128 kubectl create -f https://raw.githubusercontent.com/kubernetes/examples/master/guestbook/all-in-one/guestbook-all-in-one.yaml 129 kubectl get pods 130 kubectl get services 131 kubectl edit service frontend 132 kubectl get pods 133 kubectl get pods 134 kubectl exec -ti redis-master-1068406935-vndx7 -- redis-cli info 135 kubectl get pods 136 kubectl scale deployments redis-slave --replicas=5 137 kubectl get pods 138 kubectl get pods 139 kubectl exec -ti redis-master-1068406935-vndx7 -- redis-cli info 140 kubectl get pods 141 minikube service frontend 142 clear 143 ls -l 144 cd manifests/ 145 ls -l 146 cd rs/ 147 ls -l 148 clear 149 more 150 ls -l 151 pwd 152 more rs-example.yml 153 vi rs-example.yml 154 git rs-example.yml 155 git add rs-example.yml 156 git commit -m "fix rs example" 157 git push 158 more rs-example.yml 159 kubectl create -f rs-example.yml 160 kubectl get rs 161 kubectl delete deployments ghost nginx redis redis-master redis-slave 162 kubectl get rs 163 kubectl delete deployments frontend 164 kubectl get rs 165 kubectl getpods 166 kubectl get pods 167 kubectl edit rs foo 168 kubectl get pods 169 kubectl edit rs foo 170 kubectl get pods 171 kubectl get pods --show-labels 172 more rs-example.yml 173 kubectl edit rs foo 174 kubectl get pods 175 tmux 176 kubectl get pods -l run=nginx 177 kubectl get pods -l run=nginx -o json | more 178 kubectl get pods -l run=nginx -o json | jq -r [].status.podIP 179 kubectl get pods -l run=nginx -o json | jq -r [].status 180 kubectl get pods -l run=nginx -o json | jq -r .items.status 181 kubectl get pods -l run=nginx -o json | jq -r .items 182 kubectl get pods -l run=nginx -o json | jq -r .items.podIP 183 kubectl get pods -l run=nginx -o json | jq -r .items.status.podIP 184 kubectl get pods -l run=nginx -o json | jq -r .items.status 185 kubectl get pods -l run=nginx -o json | jq -r .items[].status 186 kubectl get pods -l run=nginx -o json | jq -r .items[].status.podIP 187 kubectl get svc 188 kubectl get endpoints 189 kubectl get pods -l run=nginx -o json | jq -r .items[].status.podIP 190 kubectl scale deployments nginx --replicas=5 191 kubectl get pods -l run=nginx -o json | jq -r .items[].status.podIP 192 kubectl get pods -l run=nginx -o json | jq -r .items[].status.podIP 193 kubectl get pods 194 kubectl get pods -l run=nginx -o json | jq -r .items[].status.podIP 195 kubectl get endpoints 196 clear 197 cd .. 198 ls -l 199 clear 200 ls -l 201 cd .. 202 ls -l 203 clear 204 clear 205 clear 206 minikube status 207 kubectl get pods 208 clear 209 minikube status 210 kubectl get nodes 211 kubectl get 212 kubectl run ghost --image=ghost 213 kubectl expose deployments ghost --port=2368 --type=NodePort 214 clear 215 kubectl get pods 216 kubectl get replicaset 217 kubectl get pods -LRUN 218 kubectl get pods -Lrun 219 kubectl get svc 220 kubectl get endpoints 221 kubectl get pods -Lrun 222 minikube service ghost 223 clear 224 kubectl get pods 225 kubectl get pods ghost-2663835528-k9859 -o yaml 226 kubectl get pods ghost-2663835528-k9859 -o json 227 kubectl get pods ghost-2663835528-k9859 -o json | jq -r 228 kubectl get pods ghost-2663835528-k9859 -o json | jq -r.status.podIP 229 kubectl get pods ghost-2663835528-k9859 -o json | jq -r.items[].status.podIP 230 kubectl get pods ghost-2663835528-k9859 -o json | jq -r.items.status.podIP 231 kubectl get pods ghost-2663835528-k9859 -o json | jq -r .status 232 kubectl get pods ghost-2663835528-k9859 -o json | jq -r .status.podIP 233 clear 234 kubectl get pods 235 kubectl get pods ghost-2663835528-k9859 -o yaml | more 236 kubectl run --help 237 clear 238 kubectl get pods 239 kubectl get deployments 240 kubectl scale deployments ghost --replicas=5 241 kubectl get pods 242 kubectl get pods --watch 243 kubectl get pods 244 clear 245 kubectl set image deployment ghost ghost=ghost:0.9 246 kubectl get pods 247 kubectl get pods --watch 248 kubectl get pods 249 kubectl get pods 250 kubectl get pods 251 kubectl get pods --watch 252 kubectl get rs 253 kubectl get rs 254 kubectl get pods 255 kubectl set image deployment ghost ghost=ghost:08 256 kubectl get pods 257 kubectl get pods --watch 258 kubectl get rs 259 kubectl rollout history deployment ghost 260 kubectl rollout undo deployment ghost --to-revision=1 261 kubectl get rs 262 kubectl get rs 263 kubectl get rs 264 kubectl get rs 265 kubectl get rs 266 kubectl get rs 267 kubectl rollout history deployment ghost 268 kubectl annotate deployment ghost kubernetes.io/change-cause="oreilly demo" 269 kubectl rollout history deployment ghost 270 kubectl get deployments ghost -o yaml | more 271 clear 272 kubectl rollout history deployment ghost 273 kubectl set image deployment ghost ghost=ghost:0.8 274 kubectl rollout history deployment ghost 275 ls -l 276 cd manifests/ 277 ls -l 278 cd ingress-controller/ 279 ls -l 280 more ghost.yaml 281 kubectl get svc 282 kubectl edit svc ghost 283 kubectl get svc 284 minikube service ghost 285 kubectl get pods --all-namespaces 286 minikube addons list 287 minikube addons enabled ingress 288 minikube addons enable ingress 289 clear 290 kubectl get pods --all-namespaces 291 kubectl create -f ghost.yaml 292 kubectl get ingress 293 kubectl get pods 294 kubectl run game --image=runseb/2048 295 kubectl expose deployment game --port=80 296 kubectl get pods 297 ls -l 298 more game.yaml 299 kubectl get pods 300 kubectl get pods --watch 301 kubectl get pods 302 more game.yaml 303 kubectl get pods 304 kubectl get svc 305 ls- l 306 ls -l 307 kubectl get pods --all-namespaces 308 kubectl exec -ti nginx-ingress-controller-p4gc1 -n kube-system -- /bin/bash 309 clear 310 kubectl get pods nginx-ingress-controller-p4gc1 -n kube-system -o yaml | more 311 kubectl get ingress 312 more game.yaml 313 kubectl create -f game.yaml 314 kubectl get ingress 315 kubectl get ingress 316 pwd 317 apiVersion: extensions/v1beta1 318 kind: Ingress 319 metadata: 320 name: game 321 spec: 322 rules: 323 - host: game.192.168.99.100.nip.io 324 http: 325 paths: 326 - backend: 327 serviceName: game 328 servicePort: 80 329 pwd 330 clear 331 kubectl run mysql --image=mysql:5.5 --env MYSQL_ROOT_PASSWORD=root 332 kubectl run wordpress --image=wordpress --env WORDPRESS_DB_HOST=mysql --env WORDPRESS_DB_PASSWORD=root 333 kubectl expose deployment mysql --port=3306 334 kubectl get svc 335 kubectl expose deployment wordpress --port=80 --type=NodePort 336 kubectl get svc 337 kubectl get pods 338 kubectl scale deployments ghost --replicas=1 339 clear 340 kubectl get pods 341 kubectl exec -ti mysql-3678996555-7b50q -- mysql -uroot -p 342 clear 343 kubectl get pods 344 minikube service wordpress 345 minikube service wordpress 346 kubectl get pods 347 kubectl exec -ti mysql-3678996555-7b50q -- mysql -uroot -p 348 kubectl get pods 349 kubectl get pods mysql-3678996555-7b50q -o yaml |more 350 ls -l 351 kubectl create secret generic mysql --from-literal=password=foobar 352 kubectl get secrets 353 ls -l 354 cd ... 355 cd .. 356 ls -l 357 cd wordpress/ 358 ls -l 359 more mysql-secret.yaml 360 pwd 361 kubectl create -f mysql-secret.yaml 362 kubectl get pods 363 kubectl exec -ti mysql-secret -- mysql -uroot -p 364 ls-l 365 ls -l 366 kubectl get secrets 367 kubectl get secrets mysql -o yaml 368 echo "Zm9vYmFy" | base64 --decode 369 kubectl get psp 370 clear 371 kubectl get pods 372 cd .. 373 ls -l 374 cd volumes/ 375 ls -l 376 more volumes.yaml 377 containers: 378 - image: busybox 379 command: 380 - sleep 381 - "3600" 382 volumeMounts: 383 - mountPath: /busy 384 name: test 385 imagePullPolicy: IfNotPresent 386 name: busy 387 - image: busybox 388 command: 389 - sleep 390 - "3600" 391 volumeMounts: 392 - mountPath: /box 393 name: test 394 imagePullPolicy: IfNotPresent 395 name: box 396 restartPolicy: Always 397 more volumes.yaml 398 kubectl create -f volumes.yaml 399 kubectl get pods 400 kubectl get pods 401 kubectl exec -ti vol -c busy -- ls -l /busy 402 kubectl exec -ti vol -c busy -- touch /busy/foobar 403 kubectl exec -ti vol -c busy -- ls -l /busy 404 kubectl exec -ti vol -c box -- ls -l /box 405 more volumes.yaml 406 clear 407 ls -l 408 more pvc.yaml 409 kubectl get pv 410 kubectl get pvc 411 kubectl get persistentvolume 412 kubectl create -f pvc.yaml 413 kubectl get pvc 414 kubectl get pv 415 ls -l 416 more hostpath.yaml 417 kubectl create -f hostpath.yaml 418 kubectl get pods 419 kubectl get pods 420 kubectl exec -ti hostpath -- ls -l /bitnami 421 kubectl exec -ti hostpath -- echo "dynamic storage rocks" > /bitnami/claim 422 kubectl exec -ti hostpath -- touch /bitnami/storage 423 kubectl exec -ti hostpath -- ls -l /bitnami 424 kubectl delete pods hostpath 425 kubectl get pods 426 kubectl get pods 427 kubectl get pods 428 kubectl get pods 429 kubectl get pods 430 kubectl get pods 431 kubectl get pods 432 kubectl get pvc 433 kubectl get pv 434 kubectl get pv pvc-3359d727-68c2-11e7-a55e-080027b13bd8 -o yaml | more 435 minikube ssh 436 kubectl get pods 437 ls -l 438 cd .. 439 ls -l 440 history 441 ls -l 442 cd wordpress/ 443 ls -l 444 more mysql.yaml 445 cd .. 446 l s-l 447 ls -l 448 cd volumes/ 449 ls -l 450 vi config.js 451 kubectl create configmap test --from-file=config.js 452 kubectl get configmaps 453 kubectl get cm 454 kubectl get cm test -o yaml 455 clear 456 kubectl get pods 457 ls -l 458 vi foobar.yaml 459 which helm 460 helm init 461 kubectl get pods --all-namespaces 462 helm repo list 463 helm search redis 464 helm install stable/redis 465 helm ls 466 kubectl get pods 467 kubectl get svc 468 kubectl get pvc 469 kubectl get pv 470 kubectl get deployments 471 kubectl get secrets 472 clear 473 ipython 474 clear 475 cd.. 476 ls -l 477 cd .. 478 ls -l 479 cd tpr 480 ls -l 481 clear 482 ls -l 483 kubectl get thridpartyresources 484 more bananas.yaml 485 kubectl create -f bananas.yaml 486 kubectl get thridpartyresources 487 kubectl get thridpartyresources 488 kubectl get thridpartyresource 489 kubectl get thirdpartyresource 490 minikube ssh 491 ls-l 492 ls -l 493 more kubie.yaml 494 more custom-example.yaml 495 minikube ssh 496 more bananas.yaml 497 vi foobar.yaml 498 kubectl create -f foobar.yaml 499 kubectl get kubecons 500 kubectl get kubecons ortwin -o yaml 501 clear 502 cd .. 503 cd .. 504 cd history/ 505 ls -l 506 cat history > 14072017.txt 507 history > 14072017.txt ================================================ FILE: history/16022016/history-1.txt ================================================ 307 cd manifests/ 308 ls -l 309 cd ingress-controller/ 310 ls -l 311 more backend.yaml 312 kubectl create -f backend.yaml 313 clear 314 kubectl get pods 315 kubectl delete deployments game 316 kubectl delete deployments ghost 317 kubectl get pods 318 kubectl get pods 319 kubectl run game --image=runseb/2048 320 kubectl expose deployments game --port=80 321 kubectl delete svc game 322 kubectl expose deployments game --port=80 323 kubectl get svc game -o yaml 324 clear 325 kubectl get svc 326 kubectl get svc ghost -o json | jq -r .spec.type 327 kubectl get pods 328 kubectl get pods game-1597610132-qv0z2 | jq -r .spec.containers[0].image 329 kubectl get pods game-1597610132-qv0z2 | jq -r .spec.containers 330 kubectl get pods game-1597610132-qv0z2 | jq -r .spec 331 kubectl get pods game-1597610132-qv0z2 | jq -r .items[0] 332 kubectl get pods game-1597610132-qv0z2 | jq -r 333 kubectl get pods game-1597610132-qv0z2 -o json | jq -r 334 kubectl get pods game-1597610132-qv0z2 -o json | jq -r .spec 335 kubectl get pods game-1597610132-qv0z2 -o json | jq -r .spec.containers[0].image 336 clear 337 kubectl get pods 338 ls -l 339 cat game.yaml 340 kubectl get svc game -o yaml 341 kubectl create -f game.yaml 342 clear 343 kubectl get ingress 344 kubectl get pods 345 kubectl run mysql --image=mysql:5.5 --env=MYSQL_ROOT_PASSWORD=root 346 kubectl create secret generic mysql --from-literal=MYSQL_ROOT_PASSWORD=root 347 kubectl get secrets 348 kubectl get secrets mysql -o yaml 349 kubectl label secret mysql foo=bar 350 kubectl get secrets mysql -o yaml 351 clear 352 kubectl get secrets 353 kubectl get pods 354 kubectl exec -ti mysql-3779901834-c7nc8 -- mysql -uroot -p 355 cd .. 356 ls -l 357 more mysql.yaml 358 more mysql-secret.yaml 359 kubectl get secrets 360 kubectl create -f mysql-secret.yaml 361 kubectl get pods 362 clear 363 kubectl get pods 364 more mysql-secret.yaml 365 kubectl describe pods mysql 366 more mysql-secret.yaml 367 vi mysql-secret.yaml 368 kubectl create -f mysql-secret.yaml 369 kubectl delete pods mysql 370 clear 371 kubectl create -f mysql-secret.yaml 372 kubectl get pods 373 kubectl get secrets mysql -o yaml 374 kubectl delete secret mysql 375 kubectl create secret generic mysql --from-literal=password=root 376 kubectl get pods 377 kubectl delete pods mysql 378 kubectl create -f mysql-secret.yaml 379 kubectl get pods 380 clear 381 kubectl get pods 382 kubectl get pods mysql -o yaml 383 kubectl get pods mysql -o yaml | more 384 clear 385 kubectl get pods 386 l s-l 387 l s-l 388 ls -l 389 cd ingress-controller/ 390 ls -l 391 more wordpress.yaml 392 cd .. 393 ls -l 394 clear 395 kubectl get pds 396 kubectl get pods 397 kubectl delete pods mysql 398 kubectl get deployments 399 kubectl get svc 400 kubectl expose deployments mysql --port=3306 401 kubectl run wordpress --image=wordpress 402 kubectl expose deployments wordpress --port=80 403 kubectl get pds 404 kubectl get pods 405 cd ingress-controller/ 406 ls -l 407 more wordpress.yaml 408 kubectl create -f wordpress.yaml 409 kubectl get ingress.yaml 410 kubectl get ingress 411 kubectl delete ingress game 412 kubectl get ingress 413 clear 414 kubectl get ingress 415 kubectl get secret 416 kubectl get deployments 417 kubectl get pods 418 kubectl get svc 419 kubectl delete svc game, ghost 420 kubectl delete svc game ghost 421 kubectl get svc 422 kubectl get pods --watch 423 clear 424 kubectl get pods 425 kubectl get ingress 426 clear 427 ls -l 428 kubectl get pods 429 ls -l 430 more wordpress.yaml 431 kubectl get ingress 432 kubectl get svc 433 kubectl get deployments 434 kubectl get pods 435 kubectl logs -f wordpress-468622735-p57t0 436 kubectl get pods 437 kubectl delete deployments wordpress 438 kubectl run wordpress --image=wordpress --env=WORDPRESS_DB_PASSOWRD=root --env=MYSQL_DB_HOST=mysql 439 kubectl get pods 440 kubectl get pods 441 kubectl get pods 442 kubectl get pods 443 kubectl edit deployments wordpress 444 kubectl get pods 445 kubectl get rs 446 kubectl get pods 447 kubectl describe pods wordpress-2537937599-zg2bt 448 kubectl edit deployments wordpress 449 kubectl get rs 450 kubectl get pods 451 kubectl exec -ti mysql-3779901834-c7nc8 /bin/bash 452 clear 453 kubectl get pv 454 kubectl get persistentvolumes 455 kubectl get persistentvolumeclaim 456 kubectl get pvs 457 kubectl get pvc 458 cd .. 459 ls -l 460 more volumes.yaml 461 kubectl create -f volumes.yaml 462 kubectl get pods 463 kubectl exec -ti vol -c busy /bin/sh 464 kubectl exec -ti vol -c box -- ls -l /box 465 kubectl exec -ti vol -c box -- ls -l /box 466 kubectl exec -ti vol -c box /bin/sh 467 kubectl exec -ti vol -c busy /bin/sh 468 cler 469 clear 470 kubectl get pods 471 kubectl delete pods vol 472 kubectl get pods 473 which helm 474 helm init 475 kubectl get pods --all-namespaces 476 kubectl get pods --all-namespaces 477 helm ls 478 helm repo ls 479 helm repo list 480 helm install stable/minio 481 clear 482 kubectl get pods 483 kubectl get secrets 484 kubectl get svc 485 kubectl get deployments 486 kubectl get pvc 487 kubectl get pv 488 kubectl describe svc busted-ocelot-minio 489 kubectl edit svc busted-ocelot-minio 490 kubectl describe svc busted-ocelot-minio 491 clear 492 kubectl get pv 493 clear 494 ipython 495 ls -l 496 cd .. 497 ls -l 498 cd scripts/ 499 ls -l 500 more create_pod.py 501 ./create_pod.py 502 kubectl get pods ================================================ FILE: history/16022016/history.txt ================================================ minikube status 62 kubectl get pods 63 kubectl get nodes 64 kubectl run game --image=runseb/2048 65 kubectl get deployments 66 kubectl get rs 67 kubectl get rs game-1597610132 -o yaml |more 68 cler 69 clear 70 kubectl get pods --show-labels 71 kubectl expose deployments game --port=80 --type=NodePort 72 kubectl get svc 73 kubectl describe svc game 74 minikube ssh 75 clear 76 minikube 77 minikube ip 78 minikube service game 79 clear 80 kubectl run ghost --image=ghost --record 81 kubectl get deployments 82 kubectl get deployments ghost -o yaml | more 83 clear 84 kubectl rollout history deployments ghost 85 kubectl get pods 86 kubectl expose deployment/ghost --port=2368 --type=NodePort 87 kubectl get svc 88 kubectl get pods 89 minikube service ghost 90 kubectl set image deployment/ghost ghost=ghost:09 91 kubectl get pods 92 kubectl get pods 93 kubectl rollout history deployment/ghost 94 kubectl rollout history deployment/ghost undo 95 kubectl rollout history deployments ghost undo 96 kubectl rollout history undo deployments ghost 97 clear 98 kubectl get pods 99 kubectl get pods --watch 100 kubectl rollout history deployments ghost 101 kubectl get rs 102 kubectl rollout undo deployment/ghost 103 kubectl get pods 104 kubectl scale deployment ghost --replicas=5 105 kubectl get pods 106 kubectl get pods 107 kubectl get pods 108 kubectl get pods 109 kubectl rollout history deployment/ghost 110 kubectl set image deployment/ghost ghost=ghost:0.9 111 kubectl get pods 112 kubectl get pods --watch 113 kubectl get pods 114 kubectl get pods --watch 115 clear 116 kubectl get rs 117 kubectl get rs 118 kubectl get pods 119 kubectl get deployment ghost -o yaml |more 120 clear 121 kubectl get pods 122 kubectl get pods ghost-3487275284-6tr23 -o yaml | more 123 clear 124 kubectl get rs 125 kubectl rollout history deployment/ghost 126 kubectl rollout deployment/ghost --to-revision=3 127 kubectl rollout --help 128 clear 129 kubectl rollout history --help 130 kubectl rollout history deployment/ghost 131 kubectl rollout history deployment/ghost --revision=3 132 clear 133 kubectl get rs --watch 134 kubectl get rs --watch 135 kubectl rollout history deployment/ghost 136 kubectl get rs --watch 137 kubectl rollout history deployment/ghost --revision=3 138 clear 139 kubectl get rs --watch 140 kubectl get pods 141 kubectl rollout history deployment/ghost 142 kubectl rollout history --help 143 kubectl rollout undo --help 144 kubectl rollout undo deployment/ghost --to-revision=3 145 clear 146 kubectl get rs --watch 147 kubectl get pods 148 kubectl get pods ghost-943298627-8p64g -o yaml |more 149 kubectl rollout --help 150 minikube service ghost 151 clear 152 kubectl edit deployments ghost 153 kubectl rollout history deployments ghost 154 kubectl get pods 155 kubectl get svc 156 kubectl edit svc ghost 157 kubectl get rs 158 kubectl completion --help 159 kubectl completion bash ================================================ FILE: history/16052017/history.txt ================================================ 12 which minikube 13 minikube version 14 minikube status 15 minikube start 16 kubectl get pods 17 kubectl delete deployments ghost 18 kubectl get pods --all-namespaces 19 minikube addons disable heapster 20 minikube addons list 21 minikube addons disable ingress 22 minikube addons list 23 minikube stop 24* minikube del 25 minikube start 26 clear 27 whcih gcloud 28 which gcloud 29 gcloud container clusters list 30 gcloud components update 31 clear 32 ls -l 33 clear 34 minikube status 35 kubectl get nodes 36 clear 37 clear 38 gcloud container clusters list 39 gcloud container clusters create foobar 40 kubectl config use-context minikube 41 kubect get nodes 42 kubectl get nodes 43 clear 44 kubectl get nodes 45 minikube ip 46 kubectl config view 47 clear 48 kubectl run ghost --image=ghost:0.9 49 kubectl expose deployments ghost --port=2368 --type=NodePort 50 kubectl get pods 51 kubectl get pods --watch 52 kubectl get pods 53 kubectl get service 54 minikube service ghost 55 clear 56 minikube ssh 57 kubectl get pods 58 kubectl logs ghost-3503942313-7rdcb 59 kubectl exec -ti ghost-3503942313-7rdcb -- /bin/bash 60 clear 61 minikube 62 minikube dashboard 63 clear 64 kubectl get replicasets 65 kubectl get pods 66 kubectl get replicasets 67 kubectl get service 68 clear 69 kubectl get replicasets 70 kubectl scale replicasets ghost --replicas=5 71 kubectl scale replicasets ghost-3503942313 --replicas=5 72 kubectl get replicasets 73 kubectl get replicasets 74 kubectl get replicasets 75 clear 76 kubectl scale deployments ghost --replicas=5 77 kubectl get replicasets 78 kubectl get replicasets 79 kubectl get pods 80 kubectl scale deployments ghost --replicas=2 81 kubectl get replicasets 82 kubectl get pods 83 clear 84 minikube ssh 85 clear 86 kubectl get pods 87 kubectl get po 88 kubectl get po --all-namespaces 89 kubectl get namespace 90 kubectl get ns 91 kubectl create namespace oreilly 92 kubectl get ns 93 cd .. 94 ls -l 95 vi foobar.yml 96 kubectl create -f foobar.yml 97 vi foobar.yml 98 kubectl create -f foobar.yml 99 clear 100 kubectl get pods 101 kubectl get pods 102 vi foobar.yml 103 kubectl create -f foobar.yml 104 kubectl get pods --all-namespaces 105 cat foobar.yml 106 kubectl get pods foobar -o yaml | more 107 kubectl get pods foobar -o json | more 108 kubectl get pods foobar -o json 109 kubectl get pods foobar -o json | jq -r 110 kubectl get pods 111 kubectl get svc 112 kubectl get svc ghost -o yaml | more 113 kubectl get svc deployments -o yaml | more 114 kubectl get deployments -o yaml | more 115 kubectl get deployments ghost -o yaml | more 116 q 117 clear 118 minikube ssh 119 clear 120 kubectl get --v=99 pods ghost 121 kubectl get --v=99 deployments ghost 122 kubectl delete --v=99 deployments ghost 123 kubectl get deployments 124 clear 125 kubectl get pods 126 vi game.yml 127 kubectl create -f game.yml 128 vi game.yml 129 kubectl create -f game.yml 130 kubectl get deployments 131 kubectl get pods 132 kubectl get pods --show-labesl 133 clear 134 kubectl get pods --show-labels 135 kubectl get pods -Lapp 136 kubectl get rs 137 kubectl get rs game-3045583940 -o yaml | more 138 kubectl get pods 139 vi game-svc.yml 140 kubectl create -f game-svc.yml 141 clear 142 kubectl get svc 143 kubectl get endpoints 144 kubectl edit svc game 145 kubectl get svc 146 minikube service game 147 ls-l 148 ls -l 149 more game.yml 150 more game-svc.yml 151 kubectl scale deployments game --replicas=10 152 kubectl get pods 153 kubectl get pods 154 kubectl get pods 155 kubectl get pods --watch 156 kubectl get pods 157 kubectl get endpoints 158 kubectl get endpoints game -o yaml | more 159 vi game-svc.yml ================================================ FILE: history/21022017/history.txt ================================================ clear 304 which minikube 305 minikube version 306 minikube start 307 kubectl get nodes 308 clear 309 kubectl get nodes 310 minikube dashboard 311 kubectl get pods 312 kubectl get deployments 313 kubectl get replicasets 314 kubectl get pods 315 kubectl logs redis-3133791336-392v8 316 clear 317 kubectl exec -ti redis-3133791336-392v8 -- redis-cli 318 clear 319 minikube ssh 320 cler 321 clear 322 kubectl get pods 323 kubectl get pods redis-3133791336-392v8 -o yaml 324 clesr 325 clear 326 ls -l 327 vi foobar.yaml 328 kubectl create -f foobar.yaml 329 kubectl get pods 330 kubectl get pods 331 kubectl get pods 332 kubectl scale deployments redis --replicas=5 333 kubectl get pods 334 kubectl get pods 335 kubectl scale deployments redis --replicas=2 336 kubectl get pods 337 kubectl delete pods foobar 338 kubectl get pods 339 clear 340 kubectl get pods 341 kubectl get pods 342 kubectl get pods --watch 343 kubectl get pods 344 kubectl delete pods redis-3133791336-392v8 345 kubectl get pods 346 kubectl get pods 347 clear 348 minikube ssh 349 clear 350 kubectl get ns 351 kubectl get namespaces 352 kubectl get pods --all-namespaces 353 kubectl get pods 354 kubectl get pods --all-namespaces 355 kubectl get deployments --all-namespaces 356 kubectl get rc --all-namespaces 357 clear 358 kubectl get ns 359 kubectl create ns foobar 360 kubectl get ns 361 vi foobar.yaml 362 kubectl create -f foobar.yaml 363 kubectl get pods 364 kubectl get pods --all-namespaces 365 kubectl create -f foobar.yaml 366 vi foobar.yaml 367 kubectl create -f foobar.yaml 368 clear 369 kubectl get pods --all-namespaces 370 kubectl get nodes 371 kubectl get nodes minikube -o yaml 372 minikube ssh 373 clear 374 ls -l 375 kubectl --v=99 get pods 376 kubectl --v=99 get pods --namespace=foobar 377 clear 378 kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/examples/guestbook/all-in-one/guestbook-all-in-one.yaml 379 kubectl get pods 380 kubectl get pods 381 kubectl get deployments 382 kubectl delete deployments redis 383 kubectl get deployments 384 clear 385 kubectl get pods 386 kubectl get svc 387 kubectl get services 388 kubectl edit svc frontend 389 kubectl get services 390 kubectl get pods 391 kubectl get pods --watch 392 kubectl get pods 393 kubectl get endpoints 394 kubectl get pods 395 minikube service frontend 396 clear 397 kubectl scale deployments redis-slave --replicas=5 398 kubectl get pods 399 kubectl exec -ti redis-master-343230949-816xl -- redis-cli info 400 clear 401 kubectl get pods 402 kubectl get pods --show-labels 403 kubectl get pods -l tier=backend 404 kubectl get pods -Ltier 405 kubectl get pods -l tier=frontend 406 clear 407 kubectl get pods 408 kubectl label pod foobar oreilly=rocks 409 kubectl get pods -l oreilly 410 kubectl get pods -Loreilly 411 clear 412 kubectl get rs 413 kubectl get rs frontend-88237173 -o yaml | more 414 kubectl get pods 415 kubectl label pod frontend-88237173-5l21n tier=broken 416 kubectl label pod frontend-88237173-5l21n tier=broken --overwrite 417 kubectl get pods 418 kubectl get pods -Ltier 419 kubectl get pods -l tier=broken 420 clear 421 kubectl get pods 422 kubectl delete pods frontend-88237173-5l21n 423 kubectl get pods 424 kubectl scale deployments redis-slave --replicas=2 425 clear 426 kubectl get pods 427 kubectl get svc 428 kubectl get svc frontend -o json 429 kubectl get svc frontend -o json | jq -r 430 kubectl get svc frontend -o json | jq -r .spec.clusterIP 431 kubectl get endpoints 432 kubectl get pods -l app=guestbook -l tier=frontend 433 kubectl get pods -l app=guestbook -l tier=frontend -o json | jq -r .spec.podIP 434 kubectl get pods -l app=guestbook -l tier=frontend -o json 435 kubectl get pods -l app=guestbook -l tier=frontend -o json | jq -r .spec.status.podIP 436 clear 437 kubectl get pods -l app=guestbook -l tier=frontend -o json | jq -r .items.spec.status.podIP 438 kubectl get pods -l app=guestbook -l tier=frontend -o json 439 kubectl get pods -l app=guestbook -l tier=frontend -o json | grep podIP 440 kubectl get endpoints 441 kubectl get pods -l app=guestbook -l tier=frontend -o json | jq -r .items[0].status.podIP 442 clear 443 ls -l 444 kubectl get pods 445 kubectl get pods -l app=guestbook -l tier=frontend -o json | jq -r .items[].status.podIP 446 kubectl run busybox --image=busybox --command sleep 3600 447 kubectl get pods 448 kubectl get pods 449 kubectl get pods 450 clear 451 kubectl get pods 452 kubectl exec -ti busybox-1418042613-qn8m6 -- nslookup frontend 453 kubectl get svc 454 kubectl exec -ti busybox-1418042613-qn8m6 -- nslookup redis-master ================================================ FILE: history/22082017/22082017.txt ================================================ 21 minikube start 22 kubectl get pods 23 clear 24 clear 25 minikube status 26 kubectl get pods 27 kubectl run ghost --image=ghost 28 kubectl expose deployment ghost --port=2368 --type=NodePort 29 kubectl get pods 30 kubectl get pods -w 31 kubectl get pods 32 minikube service ghost 33 minikube service ghost 34 clear 35 which minikube 36 minikube 37 clear 38 minikube status 39 minikube ip 40 minikube 41 clear 42 minikube docker-env 43 eval $(minikube docker-env) 44 docker ps 45 coker ps 46 docker ps 47 clear 48 clear 49 clear 50 minikube dashboard 51 kubectl get nodes 52 kubectl get pods 53 vi pod.yaml 54 kubectl create -f pod.yaml 55 kubectl get pods 56 more pod.yaml 57 kubectl get pods foobar 58 kubectl get pods foobar -o yaml 59 more pod.yaml 60 kubectl get pods 61 kubectl get pods ghost-1255708890-1qc4n -o yaml 62 clear 63 kubectl get pods 64 kubectl logs redis-2913962463-jhwtt 65 kubectl exec -ti redis-2913962463-jhwtt -- /bin/sh 66 clear 67 kubectl get pods 68 minikube ssh 69 clear 70 kubectl get pods 71 kubectl -v=9 get pods 72 minikube ssh 73 kubectl get pods 74 kubectl get 75 clear 76 kubectl get pods 77 kubectl get pods --all-namespaces 78 kubectl get ns 79 kubectl create -f pod.yaml 80 kubectl get pods 81 kubectl create ns oreilly 82 kubectl get ns 83 kubectl create -f pod.yaml -n oreilly 84 kubectl get pods --all-namespaces 85 clear 86 ls -l 87 cd manifests/ 88 ls -l 89 more quota.yaml 90 kubectl create -f quota.yaml 91 kubectl get resourcequota -n oreilly 92 kubectl get pods -n oreilly 93 cp ../pod.yaml bar.yaml 94 vi bar.yaml 95 kubectl create -f bar.yaml -n oreilly 96 kubectl edit resourcequota counts 97 kubectl edit resourcequota counts -n oreilly 98 kubectl create -f bar.yaml -n oreilly 99 kubectl get pods -n oreilly 100 kubectl create -f bar.yaml -n oreilly 101 kubectl edit pods bar 102 kubectl edit pods bar -n oreilly 103 clear 104 vi bar.yaml 105 ls-l 106 ls -l 107 kubectl get pods 108 kubectl scale deployment ghost --replicas=5 109 kubectl get pods 110 kubectl get pods 111 kubectl scale deployment ghost --replicas=2 112 kubectl get pods 113 clear 114 ls -l 115 cd rs/ 116 ls -l 117 more rs-example.yml 118 kubectl create -f rs-example.yml 119 kubectl get rs 120 kubectl get pods 121 kubectl logs foo-178pj 122 kubectl logs -t foo-178pj 123 kubectl logs -f foo-178pj 124 minikube logs foo-178pj 125 clear 126 kubectl get pods 127 kubectl exec -ti foo-178pj -- /bin/sh 128 kubectl get pods foo-178pj -o yaml 129 kubectl get pods 130 more rs-example.yml 131 kubectl get pods --show-labels 132 kubectl get pods -Lred 133 kubectl get pods -l red=blue 134 kubectl get pods -l red=blue | grep foo | wc -l 135 kubectl label pods foo-178pj red=green --overwrite 136 kubectl get pods -Lred 137 cd .. 138 ls -l 139 ls -l 140 vi bar.yaml 141 kubectl create -f bar.yaml 142 kubectl create -f bar.yaml -n default 143 kubectl edit resourcequota counts -n oreilly 144 kubectl create -f bar.yaml 145 kubectl get pods --all-namespaces --show-labels 146 kubectl get pods 147 clear 148 clear 149 kubectl get pods 150 kubectl get pods -Lred 151 ls -l 152 cd .. 153 ls -l 154 cd manifests/ 155 ls -l 156 more test.yaml 157 clear 158 ls -l 159 grep -r Service . 160 cp 1605207/game-svc.yml svc.yaml 161 vi svc.yaml 162 more svc.yaml 163 kubectl create -f svc.yaml 164 kubectl get svc 165 minikube service nginx 166 kubectl get pods 167 kubectl get pods -Lred 168 kubectl get endpoints 169 kubectl edit rs foo 170 kubectl get pods 171 kubectl get pods -Lred 172 kubectl get endpoints ================================================ FILE: history/23052018/history.txt ================================================ 500 vi pod.yaml 503 more pod.yaml 504 kubectl create -f pod.yaml 505 kubectl get nodes --show-labels 506 kubectl label node minikube oreilly=rocks 507 clear 508 clear 509 kubectl get pods -w 510 clear 511 kubectl get pods 512 kubectl delete pods oreilly 513 kubectl delete pods oreilly-2 514 kubectl delete rs oreilly 515 clear 516 kubectl get pods 517 kubectl get pods --show-labels 518 more rs.yaml 519 vi rs.yaml 520 kubectl create -f rs.yaml 521 clear 522 kubectl get rs 523 kubectl get pods 524 kubectl get pods --show-labels 525 vi svc.yaml 526 kubectl create -f svc.yaml 527 kubectl get svc 528 curl 192.168.99.100:31446446 529 clear 530 kubectl get svc 531 kubectl run busy -it --image=busybox -- /bin/sh 532 clear 533 minikube stop 534 minikube delete 535 cd ~/Documents 536 ls -l 537 cd seb/ 538 ls -l 539 cd .. 540 cd .. 541 ls -l 542 cd gitforks/ 543 ls- l 544 ls -l 545 cd oreilly-kubernetes/ 546 ls -l 547 clear 548 clear 549 clear 550 ls -l 551 minikube status 552 minikube delete 553 minikube start 554 kubectl get pods 555 clear 556 kubectl version 557 clear 558 which minikube 559 which gcloud 560 clear 561 minikube 562 minikube status 563 clear 564 minikube status 565 which kubectl 566 kubectl version 567 kubectl proxy 568 clear 569 tmux 570 clear 571 clear 572 ls -l 573 minikube status 574 minikube start 575 minikube dashboard 576 clear 577 kubectl get pods 578 kubectl get rs 579 kubectl get deployments 580 kubectl scale deployment redis --replicas 4 581 kubectl get deployments 582 kubectl get pods 583 kubectl set image redis redis=redis:4.5 584 kubectl set --help 585 kubectl set image --help 586 clear 587 kubectl set image deployment/redis redis=redis:4.5 588 kubectl get pods 589 kubectl get pods -w 590 kubectl get pods -w 591 kubectl get pods 592 kubectl get pods 593 kubectl get pods 594 kubectl get pods 595 kubectl get pods 596 kubectl get pods 597 kubectl set image deployment/redis redis=redis:3.2 598 kubectl get pods -w 599 clear 600 kubectl get pods 601 kubectl get pods redis-54bb49b6f9-2r652 -o yaml 602 clear 603 kubectl get pods 604 kubectl get pods -o json | jq -r .items[] 605 kubectl get pods -o json | jq -r .items[].spec.containers[0].image 606 kubectl rollout history deployment redis 607 kubectl rollout undo deployment redis --to-revision 2 608 kubectl get pods -o json | jq -r .items[].spec.containers[0].image 609 kubectl get pods 610 kubectl rollout history deployment redis 611 kubectl rollout undo deployment redis --to-revision 1 612 kubectl get pods 613 kubectl get pods 614 kubectl get pods 615 kubectl get pods 616 kubectl get pods 617 kubectl get pods 618 kubectl get pods 619 kubectl get pods 620 kubectl get pods -o json | jq -r .items[].spec.containers[0].image 621 kubectl get pods 622 kubectl get deployments 623 kubectl get rs 624 kubectl rollout history deployment redis 625 kubectl rollout undo deployment redis --to-revision 3 626 kubectl get rs 627 kubectl get rs 628 kubectl get rs 629 kubectl get rs 630 kubectl get rs 631 kubectl get rs 632 kubectl rollout history deployment redis 633 kubectl get deployment redis 634 kubectl get deployment redis -o yaml 635 clear 636 vi deploy.yaml 637 cat deploy.yaml 638 kubectl create -f deploy.yaml 639 vi deploy.yaml 640 clear 641 cat deploy.yaml 642 kubectl create -f deploy.yaml 643 kubectl get deployment 644 kubectl get rs 645 kubectl get pods 646 cat deploy.yaml 647 kubectl rollout history deployment redis 648 kubectl edit deployment redis 649 kubectl rollout history deployment redis 650 kubectl set image deployment/redis redis=redis:3.2 651 kubectl rollout history deployment redis 652 kubectl set image deployment/redis redis=redis:3.9 653 kubectl rollout history deployment redis 654 clear 655 kubectl get pods 656 kubectl delete deployment redis 657 kubectl get pods 658 kubectl run game --image=runseb/2048 659 kubectl get deployments 660 kubectl get rs 661 kubectl get pods 662 kubectl get pods 663 kubectl get svc 664 ls -l 665 more svc.yaml 666 kubectl get pods 667 kubectl get pods --show-labels 668 vi svc.yaml 669 kubectl create -f svc.yaml 670 kubectl get svc 671 kubectl expose deployments game --port 80 --type NodePort 672 kubectl delete -f svc.yaml 673 kubectl get svc 674 kubectl create -f svc.yaml 675 kubectl get svc 676 clear 677 minikube ssh 678 clear 679 ls -l 680 clear 681 kubectl get svc 682 minikube service game 683 minikube service ghost 684 clear 685 kubectl create -f https://raw.githubusercontent.com/kubernetes/examples/master/guestbook/all-in-one/guestbook-all-in-one.yaml 686 kubectl get deployments 687 kubectl get svc 688 kubectl edit svc frontend 689 kubectl get svc 690 kubectl get pods 691 kubectl get pods 692 kubectl get pods 693 kubectl get pods 694 kubectl get pods 695 minikube service frontend 696 more svc.yaml 697 clear 698 minikube addons list 699 kubectl get pods -n kube-system 700 kubectl get pods nginx-ingress-controller-5g4z8 -o yaml -n kube-system 701 kubectl get pods 702 kubectl get svc 703 kubectl edit svc frontend 704 kubectl get svc 705 ls -l 706 cd manifests/ 707 ls -l 708 cd 05-ingress-controller/ 709 ls -l 710 pwd 711 more frontend.yaml 712 vi frontend.yaml 713 vi ghost.yaml 714 vi game.yaml 715 kubectl create -f frontend.yaml 716 kubectl create -f ghost.yaml 717 kubectl create -f game.yaml 718 kubectl get ingress 719 kubectl exec -ti nginx-ingress-controller-5g4z8 -- /bin/sh -n kube-system 720 kubectl exec -ti nginx-ingress-controller-5g4z8 -n kube-system -- /bin/sh 721 clear 722 clear 723 kubectl run mysql --image=mysql:5.5 --env MYSQL_ROOT_PASSWORD=root 724 kubectl expose deployment mysql --port 3306 725 kubectl get pods 726 kubectl delete deployments frontend redis-master redis-slave 727 kubectl get pods 728 kubectl exec -ti mysql-55d65b64bb-qzhk5 -- mysql -uroot -p 729 kubectl run wordpress --image=wordpress --env WORDPRESS_DB_PASSWORD=root --env WORDPRESS_DB_HOST=mysql 730 kubectl expose deployments wordpress --port 80 731 kubectl get pods 732 kubectl get pods 733 ls -l 734 more wordpress.yaml 735 kubectl create -f wordpress.yaml 736 kubectl get ingress 737 kubectl get ingress 738 kubectl get ingress 739 kubectl get ingress 740 kubectl get ingress 741 cat wordpress.yaml 742 pwd 743 history |grep kubectl 744 kubectl get pods 745 kubectl exec -ti mysql-55d65b64bb-qzhk5 -- mysql -uroot -p 746 kubectl exec -ti mysql-55d65b64bb-qzhk5 -- mysql -uroot -p 747 kubectl run -ti busybox --image=busybox -- /bin/sh 748 cler 749 clear 750 kubectl get pods 751 kubectl get pods mysql-55d65b64bb-qzhk5 -o yaml 752 clear 753 kubectl get secrets 754 kubectl create secret generic foobar --from-literal=password=root 755 kubectl get secrets 756 kubectl get secrets foobar -o yaml 757 echo "cm9vdA==" | base64 -D 758 ls -l 759 cd .. 760 ls -l 761 cd wordpress/ 762 ls -l 763 more mysql-secret.yaml 764 kubectl create -f mysql-secret.yaml 765 kubectl get pods 766 kubectl get pods 767 kubectl logs mysql-secret 768 kubectl describe pods mysql-secret 769 more mysql-secret.yaml 770 kubectl get secrets 771 kubectl edit pod mysql-secret 772 kubectl delete pods mysql-secret 773 vi mysql-secret.yaml 774 kubectl create -f mysql-secret.yaml 775 kubectl get pods 776 kubectl exec -ti mysql-secret -- mysql -uroot -p 777 clear 778 kubectl get pods 779 ls -l 780 cd .. 781 ls -l 782 cd 06-volumes/ 783 ls -l 784 more configmap.yaml 785 ls -l 786 vi foobar.md 787 kubectl create configmap foobar --from-file=foobar.md 788 kubectl get configmap 789 kubectl get configmap foobar -o yaml 790 ls -l 791 more configmap.yaml 792 pwd 793 kubectl create -f configmap.yaml 794 kubectl get pods 795 kubectl exec -ti cm-test -- /bin/sh 796 ls -l 797 more volumes.yaml 798 kubectl create -f volumes.yaml 799 kubectl get pods 800 kubectl exec -ti vol -c busy -- ls -l /busy 801 kubectl exec -ti vol -c busy -- touch /busy/busy 802 kubectl exec -ti vol -c busy -- ls -l /busy 803 kubectl exec -ti vol -c box -- ls -l /box 804 ls -l 805 kubectl get pods 806 kubectl delete pods cm-test vol mysql-secret 807 kubectl get pods 808 kubectl get pods 809 kubectl get pods 810 kubectl get pods 811 kubectl get pods 812 kubectl get pods 813 kubectl get pods 814 kubectl get pods 815 kubectl get pods 816 kubectl get pods 817 kubectl get pods 818 kubectl get pods 819 clear 820 kubectl get pods 821 kubectl get pods 822 kubectl get pods 823 kubectl get pods 824 kubectl get pods 825 kubectl get pods 826 kubectl get pods 827 kubectl get pods 828 clear 829 kubectl get pods 830 kubectl delete pods mysql-55d65b64bb-qzhk5 831 kubectl get pods 832 kubectl get pods 833 ls -l 834 more pvc.yaml 835 kubectl create -f pvc.yaml 836 kubectl get pvc 837 kubectl get pv 838 kubectl get storageclass 839 kubectl get storageclass standard -o yaml 840 kubectl get pv 841 kubectl get pv pvc-9a79e0ae-5e95-11e8-9bed-080027dd6acf -o yaml 842 ls -l /tmp/hostpath-provisioner/pvc-9a79e0ae-5e95-11e8-9bed-080027dd6acf 843 clear 844 ls -l 845 kubectl get pvc 846 more mysql.yaml 847 kubectl create -f mysql.yaml 848 kubectl get pods 849 kubectl exec -ti data -- mysql -uroot -p 850 kubectl get pods 851 kubectl delete pods data 852 kubectl get pods 853 kubectl get pods 854 kubectl get pods 855 kubectl get pods 856 minikube ssh 857 kubectl get pods 858 kubectl create -f mysql.yaml 859 kubectl get pods 860 kubectl exec -ti data -- mysql -uroot -p 861 clear 862 kubectl get pods 863 kubectl delete deployments ghost game busybox 864 which helm 865 helm 866 helm init 867 kubectl get pods -n kube-system 868 kubectl get pods -n kube-system -w 869 helm ls 870 helm repo list 871 helm search minio 872 helm inspect stable/minio 873 helm install stable/minio 874 kubectl get pods 875 helm ls 876 helm delete plinking-lambkin 877 helm ls 878 kubectl get pods 879 helm create oreilly 880 cd oreilly/ 881 ls -l 882 tree . 883 cd templates/ 884 ls -l 885 more service.yaml 886 cat ../values.yaml 887 cd .. 890 cd .. 892 cd .. 894 cd 07-crd/ 897 more fruit.yml 898 kubectl get fruits 899 kubectl create -f fruit.yml 900 kubectl get fruits 901 more mango.yaml 902 more database.yml 903 vi mango.yaml 904 cat db.yml 905 vi mango.yaml 906 cat mango.yaml 907 kubectl create -f mango.yaml 908 kubectl get fruits 909 kubectl get fruit 910 kubectl get fr 911 kubectl get fr mango -o yaml ================================================ FILE: history/7062018/history.txt ================================================ 589 kubectl get pods 590 kubectl logs redis-dff85b6f4-4cs7w 591 kubectl exec -ti redis-dff85b6f4-4cs7w -- /bin/sh 592 clear 593 vi pod.yaml 594 cat pod.yaml 595 kubectl create -f pod.yaml 596 kubectl get pods 597 kubectl exec -ti oreilly -- redis-cli 598 cler 599 clear 600 kubectl get pods 601 kubectl scale deployments redis --replicas 4 602 kubectl get pods 603 kubectl get pods 604 kubectl scale deployments redis --replicas 2 605 kubectl get pods 606 kubectl get pods 607 clear 608 kubectl get pods 609 kubectl delete pods redis-dff85b6f4-4cs7w 610 kubectl get pods 611 kubectl delete pods oreilly 612 kubectl get pods 613 kubectl get pods 614 kubectl get replicasets 615 clear 616 vi rs.yaml 617 cat rs.yaml 618 kubectl get pods 619 kubectl create -f rs.yaml 620 kubectl get pods 621 kubectl delete pods oreilly-pw2rw 622 kubectl get pods 623 kubectl get pods 624 kubectl edit rs oreilly 625 kubectl get pods 626 kubectl get pods 627 kubectl get pods --show-labels 628 kubectl get pods -l app=oreilly 629 clear 630 more pod.yaml 631 more rs.yaml 632 clear 633 kubectl get 634 clear 635 curl localhost:8001 636 curl localhost:8001/api/v1 637 kubectl get pods -v=9 638 clear 639 curl localhost:8001/api/v1/namespaces/default/pods 640 curl localhost:8001/api/v1/namespaces/default/pods | jq -r 641 curl localhost:8001/api/v1/namespaces/default/pods | jq -r .items 642 curl localhost:8001/api/v1/namespaces/default/pods | jq -r .items[].metadata.name 643 curl -XDELETE localhost:8001/api/v1/namespaces/default/pods/oreilly-d6rdc 644 kubectl get pods 645 clear 646 clear 647 kubectl get pods 648 kubectl create -f pod.yaml 649 kubectl get pods 650 vi pod.yaml 651 kubectl create -f pod.yaml 652 kubectl get pods 653 kubectl create ns oreilly 654 vi pod.yaml 655 kubectl create -f pod.yaml 656 clear 657 kubectl get pods 658 kubectl get pods --namespace oreilly 659 kubectl get pods 660 kubectl get pods --namespace oreilly -v=9 661 kubectl get pods -v=9 662 clear 663 kubectl get ns 664 kubectl get namespace 665 kubectl create ns foobar 666 kubectl get namespace 667 kubectl get pods --all-namespaces 668 clear 669 kubectl create resourcequota test --hard=pods=6 670 kubectl get resourcequota 671 kubectl get pods 672 vi pod.yaml 673 kubectl create -f pod.yaml 674 kubectl create -f pod.yaml -n oreilly 675 clear 676 kubectl get pods 677 kubectl logs oreilly 678 kubectl describe pods oreilly 679 kubectl get pods -n oreilly 680 kubectl logs oreilly -n oreilly 681 kubectl describe oreilly -n oreilly 682 kubectl describe pods oreilly -n oreilly 683 kubectl logs oreilly -n oreilly 684 kubectl get pods -n oreilly 685 clear 686 kubectl get pods 687 kubectl delete rs oreilly redis 688 kubectl delete pods oreilly 689 kubectl get pods 690 clear 691 vi pod.yaml 692 kubectl create -f pod.yaml 693 kubectl get pods 694 vi svc.yaml 695 cat svc.yaml 696 kubectl create -f svc.yaml 697 kubectl get endpoints 698 kubectl get svc 699 kubectl get service 700 kubectl get endpoints 701 kubectl get pods 702 kubectl get pods --show-labels 703 kubectl label pods game app=game 704 kubectl get pods --show-labels 705 kubectl get endpoints 706 kubectl get svc 707 minikube service game 708 clear 709 kubectl get svc 710 kubectl run -it busybox --image=busybox -- /bin/sh 711 clest 712 clear 713 minikube stop 714 lear 715 clear 716 ls -l 717 clear 718 minikube delete 719 minikube start 720 cler 721 clear 722 kubectl get nodes 723 clear 724 clear 725 minikube status 726 kubectl get nodes 727 kubectl get nodes -v=9 728 clear 729 kubectl get pods 730 kubectl get rs 731 kubectl get svc 732 clear 733 minikube dashboard 734 kubectl get pods --all-namespaces 735 kubectl run game --image=runseb/2048 736 kubectl get pods 737 kubectl get rs 738 kubectl expose deployments game --port 80 --type NodePort 739 kubectl get svc 740 minikube service game 741 clear 742 kubectl run ghost --image=ghost 743 kubectl expose deployment ghost --port 2368 --type NodePort 744 kubectl get pods 745 kubectl get pods --show-labels 746 kubectl get pods --show-labels 747 kubectl get svc 748 minikube service ghost 749 clear 750 more pod.yaml 751 more rs.yaml 752 more svc.yaml 753 kubectl run ghost --image=ghost --help 754 clear 755 more rs.yaml 756 clear 757 kubectl get pods 758 kubectl get pods -o yaml game-755c6b9b8c-rm68v --export 759 clear 760 kubectl run --generator=v1/pod game --image=ghost 761 kubectl run --generator=run-pod/v1 game --image=ghost 762 kubectl run --generator=run-pod/v1 game --image=ghost --dry-run 763 kubectl run --generator=run-pod/v1 game --image=ghost --dry-run -o yaml 764 kubectl run ghost --image=ghost 765 kubectl expose deployment ghost --port 2368 --type NodePort 766 cler 767 clear 768 kubectl create ns oreilly 769 kubectl run ghost --image=ghost -n oreilly 770 kubectl get pods --all-namespaces 771 kubectl delete ns oreilly 772 clear 773 kubectl get pods 774 kubectl delete pods game 775 kubectl get pods 776 kubectl set image deployment game game=runseb/4096 777 kubectl get pods 778 kubectl scale deployment game --replicas 4 779 kubectl get pods 780 kubectl get pods 781 kubectl set image deployment game game=nginx 782 kubectl get pods -w 783 kubectl get pods 784 clear 785 kubectl get pods 786 kubectl get pods 787 kubectl set image deployment game game=runseb/4096 788 kubectl get pods 789 kubectl get pods 790 kubectl get pods 791 kubectl get pods -o json |jq -r .items[] 792 kubectl get pods -o json |jq -r .items[].spec 793 kubectl get pods -o json |jq -r .items[].spec.containers[0] 794 kubectl get pods -o json |jq -r .items[].spec.containers[0].image 795 kubectl get pods 796 clear 797 kubectl get deployments 798 kubectl get rs 799 kubectl get rs game-5fb9959fbc -o yaml 800 kubectl get rs 801 kubectl get rs game-755c6b9b8c -o yaml 802 kubectl get rs 803 kubectl get rs game-d4bfc6874 -o yaml 804 kubectl get rs 805 kubectl rollout history deployment game 806 kubectl rollout undo deployment game --to-revision 1 807 kubectl get rs 808 kubectl get rs 809 kubectl get rs -w 810 kubectl get pods 811 kubectl get pods -o json |jq -r .items[].spec.containers[0].image 812 kubectl rollout history deployment game 813 kubectl rollout undo deployment game --to-revision 3 814 kubectl get rs -w 815 kubectl get pods 816 kubectl rollout history deployment game 817 kubectl rollout undo deployment game --to-revision 4 818 kubectl get pods 819 kubectl get rs 820 clear 821 kubectl get deployment 822 kubectl get deployment game -o yaml 823 clear 824 kubectl create -f https://raw.githubusercontent.com/kubernetes/examples/master/guestbook/all-in-one/guestbook-all-in-one.yaml 825 kubectl get deployments 826 kubectl get svc 827 kubectl get pods 828 kubectl get pods 829 kubectl rollout history deployment game 830 kubectl rollout undo deployment game --to-revision 5 831 kubectl get pods 832 kubectl scale deployment game --replicas 1 833 kubectl get pods 834 kubectl exec -ti redis-master-55db5f7567-vhnzb -- redis-cli 835 kubectl scale deployment redis-slave --replicas 5 836 kubectl exec -ti redis-master-55db5f7567-vhnzb -- redis-cli info 837 kubectl getpods 838 kubectl get pods 839 kubectl get svc 840 kubectl edit svc frontend 841 kubectl get svc 842 minikube service frontend 843 kubectl edit svc frontend 844 clear 845 kubectl get deployments 846 kubectl get deployments game -o yaml 847 clear 848 minikube addons list 849 kubectl get pods -n kube-system 850 pwd 851 ls -l 852 cd manifests 853 ls -l 854 cd 05-ingress-controller/ 855 ls -l 856 kubectl get svc 857 kubectl edit svc frontend 858 kubectl edit svc game 859 kubectl edit svc ghost 860 kubectl get svc 861 ls -l 862 more game.yaml 863 more ghost.yaml 864 more frontend.yaml 865 kubectl create -f game.yaml -f ghost.yaml -f frontend.yaml 866 kubectl get ingress 867 nslookup frontend.192.168.99.100.nip.io 868 nslookup game.192.168.99.100.nip.io 869 kubectl get ingress 870 clear 871 kubectl get ingress 872 kubectl get pods -n kube-system 873 kubectl edit ingress frontend 874 ls -l 875 vi frontend.yaml 876 kubectl replace -f frontend.yaml 877 kubectl get pods 878 kubectl get ingress 879 clear 880 kubectl get pods -n kube-system 881 kubectl get pods nginx-ingress-controller-rnncf -o yaml -n kube-system 882 kubectl exec -ti nginx-ingress-controller-rnncf -- /bin/sh -n kube-system 883 kubectl exec -ti nginx-ingress-controller-rnncf -n kube-system -- /bin/sh 884 clear 885 kubectl run mysql --image=mysql:5.5 --env MYSQL_ROOT_PASSWORD=root 886 kubectl expose deployment mysql --port 3306 887 kubectl run wordpress --image=wordpress --env WORDPRESS_DB_HOST=mysql --env WORDPRESS_DB_PASSWORD=root 888 kubectl expose deployment wordpress --port 80 889 kubectl get pods 890 mkdir pkg 891 cd pkg 892 ls -l 893 kubectl get deployment wordpress -o yaml --export > wordpress.yaml 894 vi wordpress.yaml 895 clear 896 cd .. 897 kubectl get pods 898 ls -l 899 more wordpress.yaml 900 kubectl create -f wordpress.yaml 901 kubectl get ingress 902 kubectl get ingress 903 kubectl get ingress 904 kubectl get ingress 905 clear 906 kubectl get deployments 907 kubectl get pods 908 kubectl exec -ti mysql-55d65b64bb-fhqbd -- mysql -uroot -p 909 clear 910 kubectl get pods 911 kubectl get pods mysql-55d65b64bb-fhqbd -o yaml 912 clear 913 kubectl create secret generic mysql --from-literal=password=root 914 kubectl get secrets 915 kubectl get secrets mysql -o yaml 916 ls -l 917 kubectl create configmap foobar --from-file=ghost.yaml 918 kubectl get configmap 919 kubectl get cm 920 kubectl get cm foobar -o yaml 921 clear 922 kubectl get secret 923 kubectl get cm 924 kubectl get secret mysql -o yaml 925 echo "cm9vdA==" | base64 -D 926 ls -l 927 cd .. 928 ls -l 929 cd wordpress/ 930 ls -l 931 pwd 932 more mysql-secret.yaml 933 kubectl get secrets 934 vi mysql-secret.yaml 935 kubectl create -f mysql-secret.yaml 936 kubectl get pods 937 kubectl exec -ti mysql-secret -- mysql -uroot -p 938 more mysql 939 more mysql-secret.yaml 940 ls -l 941 cd // 942 pwd 943 cd ~/gitforks/oreilly-kubernetes/ 944 ls -l 945 cd manifests/ 946 ls -l 947 cd configmaps/ 948 ls -l 949 pwd 950 more configmap.yaml 951 ls -l 952 more pod.yaml 953 clear 954 kubectl get pods 955 kubectl delete pods mysql-55d65b64bb-fhqbd 956 kubectl get pods 957 kubectl get pods 958 cd .. 959 ls -l 960 clear 961 cd 06-volumes/ 962 ls -l 963 more volumes.yaml 964 kubectl create -f volumes.yaml 965 kubectl get pods 966 kubectl get pods 967 kubectl delete deployments redis-master redis-slave frontend 968 kubectl delete pods mysql-secret 969 clear 970 kubectl get pods 971 kubectl exec -ti vol -c box -- ls -l /box 972 kubectl exec -ti vol -c busy -- ls -l /busy 973 kubectl exec -ti vol -c busy -- touch /box/foobar 974 kubectl exec -ti vol -c busy -- ls -l /busy 975 kubectl exec -ti vol -c busy -- /bin/sh 976 kubectl exec -ti vol -c busy -- ls -l /busy 977 kubectl exec -ti vol -c box -- ls -l /box 978 kubectl exec -ti vol -c box -- cat /box/foobar 979 more volumes.yaml 980 ls -l 981 clear 982 kubectl get pv 983 kubectl get pvc 984 ls -l 985 more pvc.yaml 986 kubectl create -f pvc.yaml 987 kubectl get pvc 988 kubectl get pv 989 kubectl get storageclass 990 kubectl get storageclass standard -o yaml 991 kubectl get pv 992 kubectl get pv pvc-5d651b5c-6a5f-11e8-825a-08002750803a -o yaml 993 ls -l 994 more mysql.yaml 995 kubectl get pvc 996 clear 997 kubectl get pvc 998 kubectl get pv 999 kubectl create -f mysql.yaml 1000 kubectl get pods 1001 kubectl exec -ti data -- mysql -uroot -p 1002 kubectl get pods 1003 kubectl delete pods data 1004 kubectl get pods 1005 kubectl get pv 1006 kubectl get pods 1007 kubectl create -f mysql.yaml 1008 kubectl get pods 1009 kubectl get pods 1010 kubectl exec -ti data -- mysql -uroot -p 1011 clear 1012 python 1013 ipyton 1014 ipython 1015 y 1016 clear 1017 cd .. 1018 ls -l 1019 cd 07-crd/ 1020 ls -l 1021 clear 1022 l s-l 1023 ls -l 1024 more fruit.yml 1025 kubectl get fruits 1026 kubectl create -f fruit.yml 1027 kubectl get fruits 1028 more mango.yaml 1029 kubectl create -f mango.yaml 1030 kubectl get fruits 1031 kubectl get fruits mango -o yaml 1032 kubectl get fruits -v=9 1033 clear 1034 kubectl get fruits 1035 kubectl get fruit 1036 pwd 1037 ls -l 1038 clear 1039 which helm 1040 helm repo list 1041 helm search redis 1042 helm install stable/redis 1043 helm init 1044 kubectl get pods --all-namespaces 1045 kubectl get pods --all-namespaces 1046 kubectl get pods --all-namespaces 1047 kubectl get pods --all-namespaces 1048 clear 1049 helm install stable/redis 1050 kubectl get pods 1051 helm ls 1052 ls -l 1053 cd .. 1054 ls-l 1055 ls - 1056 ls -l 1057 helm create oreilly 1058 cd oreilly/ 1059 tree 1060 cat templates/service.yaml 1061 more values.yaml ================================================ FILE: kusto/base/kustomization.yaml ================================================ resources: - pod.yaml ================================================ FILE: kusto/base/pod.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: kusto spec: containers: - name: test image: nginx ================================================ FILE: kusto/overlays/dev/kustomization.yaml ================================================ resources: - ../../base commonLabels: stage: dev ================================================ FILE: kusto/overlays/prod/kustomization.yaml ================================================ resources: - ../../base commonLabels: stage: prod ================================================ FILE: manifests/01-pod/README.md ================================================   # Pod chapter First lab is just a basic pod running a redis cache image. File : redis.yaml Second lab adds a namespace "oreilly" and a ResourceQuota > kubectl create ns oreilly File : rq.yaml ================================================ FILE: manifests/01-pod/busybox.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: busybox namespace: default spec: containers: - image: busybox command: - sleep - "3600" imagePullPolicy: IfNotPresent name: busybox restartPolicy: Always ================================================ FILE: manifests/01-pod/foobar.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: foobar namespace: default spec: containers: - image: ghost name: ghost ================================================ FILE: manifests/01-pod/lifecycle.yaml ================================================ kind: Deployment apiVersion: apps/v1beta1 metadata: name: loap spec: replicas: 1 template: metadata: labels: app: loap spec: initContainers: - name: init image: busybox command: ['sh', '-c', 'echo $(date +%s): INIT >> /loap/timing'] volumeMounts: - mountPath: /loap name: timing containers: - name: main image: busybox command: ['sh', '-c', 'echo $(date +%s): START >> /loap/timing; sleep 10; echo $(date +%s): END >> /loap/timing;'] volumeMounts: - mountPath: /loap name: timing livenessProbe: exec: command: ['sh', '-c', 'echo $(date +%s): LIVENESS >> /loap/timing'] readinessProbe: exec: command: ['sh', '-c', 'echo $(date +%s): READINESS >> /loap/timing'] lifecycle: postStart: exec: command: ['sh', '-c', 'echo $(date +%s): POST-START >> /loap/timing'] preStop: exec: command: ['sh', '-c', 'echo $(date +%s): PRE-HOOK >> /loap/timing'] volumes: - name: timing hostPath: path: /tmp/loap ================================================ FILE: manifests/01-pod/multi.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: multi namespace: oreilly spec: containers: - image: busybox command: - sleep - "3600" imagePullPolicy: IfNotPresent name: busybox - image: redis name: redis restartPolicy: Always ================================================ FILE: manifests/01-pod/redis.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: redis spec: containers: - image: redis:3.2 imagePullPolicy: IfNotPresent name: mysql restartPolicy: Always ================================================ FILE: manifests/02-quota/README.md ================================================   # Quota chapter First example file is quota.yaml, to create a simple ResourceQuota Second example, rq.yaml, is more complex and will create a namespace, a ResourceQuota and a pod ================================================ FILE: manifests/02-quota/quota.yaml ================================================ apiVersion: v1 kind: ResourceQuota metadata: name: counts namespace: oreilly spec: hard: pods: "1" ================================================ FILE: manifests/02-quota/rq.yaml ================================================ apiVersion: v1 kind: Namespace metadata: name: oreilly --- apiVersion: v1 kind: ResourceQuota metadata: name: counts namespace: oreilly spec: hard: pods: "1" --- apiVersion: v1 kind: Pod metadata: name: redis namespace: oreilly spec: containers: - image: redis:3.2 imagePullPolicy: IfNotPresent name: mysql restartPolicy: Always ================================================ FILE: manifests/02-quota/rq.yaml.fmn ================================================ apiVersion: v1 kind: ResourceQuota metadata: name: object-counts spec: hard: pods: 1 ================================================ FILE: manifests/03-rs/README.md ================================================   # ReplicaSet Two files presented as an example. redis-rc.yaml starts a RS with a redis image, with 2 replicas rs-example.yaml starts a RS with an nginx image, with 3 replicas Both are using a label to identify the pods, either an app label, or a "color" label. ================================================ FILE: manifests/03-rs/redis-rc.yaml ================================================ apiVersion: v1 kind: ReplicationController metadata: name: redis namespace: default spec: replicas: 2 selector: app: redis template: metadata: name: redis labels: app: redis spec: containers: - image: redis:3.2 name: redis ================================================ FILE: manifests/03-rs/rs-example.yml ================================================ apiVersion: extensions/v1beta1 kind: ReplicaSet metadata: name: foo spec: replicas: 3 selector: matchLabels: red: blue template: metadata: name: foo labels: red: blue spec: containers: - image: nginx name: nginx ================================================ FILE: manifests/03-rs/rs.yaml ================================================ apiVersion: apps/v1 kind: ReplicaSet metadata: name: lodh spec: replicas: 5 selector: matchLabels: bank: lodh template: metadata: name: pod labels: bank: lodh spec: containers: - name: one image: redis ================================================ FILE: manifests/04-services/README.md ================================================   # Services chapter svc.yaml creates a simple Service that exposes port 80 of a pod that matches the selector "red: blue" ================================================ FILE: manifests/04-services/headless.yaml ================================================ apiVersion: v1 kind: Service metadata: name: myexternaldb namespace: default spec: ports: - protocol: TCP port: 3306 targetPort: 3306 --- apiVersion: v1 kind: Endpoints metadata: name: myexternaldb subsets: - addresses: ips: - 1.2.3.4 ports: - port: 3306 ================================================ FILE: manifests/04-services/svc.yaml ================================================ apiVersion: v1 kind: Service metadata: name: nginx namespace: default spec: selector: red: blue ports: - protocol: TCP port: 80 targetPort: 80 type: NodePort ================================================ FILE: manifests/05-ingress-controller/README.md ================================================   # Ingress Controller chapter This chapter includes the creation of a simple Ingress controler attached to an nginx pod ghost.yaml is just an example of a controller creation ingress.yaml will create the full deployment wordpress.yaml and game.yaml will create each an ingress controller, servicing port 80 for services named game and ingress frontend.yaml creates also an ingress controller for port 80 of an nginx service backend.yaml is a more complex example, which will create two Replication controllers, http-backend and nginx-ingress-controller, along with the necessary services. ================================================ FILE: manifests/05-ingress-controller/backend.yaml ================================================ # https://github.com/kubernetes/contrib/blob/master/ingress/controllers/nginx/examples/default-backend.yaml apiVersion: v1 kind: ReplicationController metadata: name: default-http-backend spec: replicas: 1 selector: app: default-http-backend template: metadata: labels: app: default-http-backend spec: terminationGracePeriodSeconds: 60 containers: - name: default-http-backend # Any image is permissable as long as: # 1. It serves a 404 page at / # 2. It serves 200 on a /healthz endpoint image: gcr.io/google_containers/defaultbackend:1.0 livenessProbe: httpGet: path: /healthz port: 8080 scheme: HTTP initialDelaySeconds: 30 timeoutSeconds: 5 ports: - containerPort: 8080 resources: limits: cpu: 10m memory: 20Mi requests: cpu: 10m memory: 20Mi --- # create a service for the default backend apiVersion: v1 kind: Service metadata: labels: app: default-http-backend name: default-http-backend spec: ports: - port: 80 protocol: TCP targetPort: 8080 selector: app: default-http-backend sessionAffinity: None type: ClusterIP --- # Replication controller for the load balancer apiVersion: v1 kind: ReplicationController metadata: name: nginx-ingress-controller labels: k8s-app: nginx-ingress-lb spec: replicas: 1 selector: k8s-app: nginx-ingress-lb template: metadata: labels: k8s-app: nginx-ingress-lb name: nginx-ingress-lb spec: terminationGracePeriodSeconds: 60 containers: - image: gcr.io/google_containers/nginx-ingress-controller:0.8.2 name: nginx-ingress-lb imagePullPolicy: Always livenessProbe: httpGet: path: /healthz port: 10249 scheme: HTTP initialDelaySeconds: 30 timeoutSeconds: 5 # use downward API env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace ports: - containerPort: 80 hostPort: 80 - containerPort: 443 hostPort: 443 args: - /nginx-ingress-controller - --default-backend-service=default/default-http-backend ================================================ FILE: manifests/05-ingress-controller/frontend.yaml ================================================ apiVersion: extensions/v1beta1 kind: Ingress metadata: name: frontend spec: rules: - host: frontend.192.168.99.100.nip.io http: paths: - backend: serviceName: frontend servicePort: 80 ================================================ FILE: manifests/05-ingress-controller/game.yaml ================================================ apiVersion: extensions/v1beta1 kind: Ingress metadata: name: game spec: rules: - host: game.192.168.99.100.nip.io http: paths: - backend: serviceName: game servicePort: 80 ================================================ FILE: manifests/05-ingress-controller/ghost.yaml ================================================ apiVersion: extensions/v1beta1 kind: Ingress metadata: name: ghost spec: rules: - host: ghost.192.168.99.100.nip.io http: paths: - backend: serviceName: ghost servicePort: 2368 ================================================ FILE: manifests/05-ingress-controller/ingress.yaml ================================================ # ghost app apiVersion: v1 kind: Pod metadata: name: nginx labels: run: nginx spec: containers: - image: nginx name: nginx ports: - containerPort: 80 protocol: TCP --- # ghost service #1 apiVersion: v1 kind: Service metadata: labels: run: nginx name: nginx spec: ports: - port: 80 protocol: TCP targetPort: 80 selector: run: nginx --- # Create the ingress resource # https://github.com/kubernetes/contrib/blob/master/ingress/controllers/nginx/examples/ingress.yaml apiVersion: extensions/v1beta1 kind: Ingress metadata: name: nginx spec: rules: - host: nginx.192.168.99.100.nip.io http: paths: - backend: serviceName: nginx servicePort: 80 ================================================ FILE: manifests/05-ingress-controller/wordpress.yaml ================================================ apiVersion: extensions/v1beta1 kind: Ingress metadata: name: wordpress spec: rules: - host: wordpress.192.168.99.100.nip.io http: paths: - backend: serviceName: wordpress servicePort: 80 ================================================ FILE: manifests/06-volumes/README.md ================================================   # Volumes Controller chapter The first Volumes exercice is to create a shared volume between two pods, and experiment with emptyDir & mountPath File : volumes.yaml Second step is to work with Persistent volumes & claims : pcv.yaml Other files provided for discussion ================================================ FILE: manifests/06-volumes/cm-vol.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: oreilly labels: app: vol spec: containers: - image: busybox command: - sleep - "3600" volumeMounts: - mountPath: /oreilly name: test imagePullPolicy: IfNotPresent name: busybox restartPolicy: Always volumes: - name: test configMap: name: foobar ================================================ FILE: manifests/06-volumes/configmap.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: cm-test labels: app: vol spec: containers: - image: busybox command: - sleep - "3600" volumeMounts: - mountPath: /velocity name: test imagePullPolicy: IfNotPresent name: busybox restartPolicy: Always volumes: - name: test configMap: name: foobar ================================================ FILE: manifests/06-volumes/foobar.md ================================================ # this is a file this is an example ================================================ FILE: manifests/06-volumes/hostpath.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: hostpath spec: containers: - image: busybox name: busybox command: - sleep - "3600" volumeMounts: - mountPath: /bitnami name: foobar volumes: - name: foobar persistentVolumeClaim: claimName: myclaim ================================================ FILE: manifests/06-volumes/mysql.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: data spec: containers: - image: mysql:5.5 name: db volumeMounts: - mountPath: /var/lib/mysql name: barfoo env: - name: MYSQL_ROOT_PASSWORD value: root volumes: - name: barfoo persistentVolumeClaim: claimName: foobar ================================================ FILE: manifests/06-volumes/oreilly/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *~ # Various IDEs .project .idea/ *.tmproj ================================================ FILE: manifests/06-volumes/oreilly/Chart.yaml ================================================ apiVersion: v1 appVersion: "1.0" description: A Helm chart for Kubernetes name: oreilly version: 0.1.0 ================================================ FILE: manifests/06-volumes/oreilly/templates/NOTES.txt ================================================ 1. Get the application URL by running these commands: {{- if .Values.ingress.enabled }} {{- range .Values.ingress.hosts }} http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} {{- end }} {{- else if contains "NodePort" .Values.service.type }} export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "oreilly.fullname" . }}) export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") echo http://$NODE_IP:$NODE_PORT {{- else if contains "LoadBalancer" .Values.service.type }} NOTE: It may take a few minutes for the LoadBalancer IP to be available. You can watch the status of by running 'kubectl get svc -w {{ template "oreilly.fullname" . }}' export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "oreilly.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') echo http://$SERVICE_IP:{{ .Values.service.port }} {{- else if contains "ClusterIP" .Values.service.type }} export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "oreilly.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") echo "Visit http://127.0.0.1:8080 to use your application" kubectl port-forward $POD_NAME 8080:80 {{- end }} ================================================ FILE: manifests/06-volumes/oreilly/templates/_helpers.tpl ================================================ {{/* vim: set filetype=mustache: */}} {{/* Expand the name of the chart. */}} {{- define "oreilly.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} {{- define "oreilly.fullname" -}} {{- if .Values.fullnameOverride -}} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} {{- else -}} {{- $name := default .Chart.Name .Values.nameOverride -}} {{- if contains $name .Release.Name -}} {{- .Release.Name | trunc 63 | trimSuffix "-" -}} {{- else -}} {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} {{- end -}} {{- end -}} {{- end -}} {{/* Create chart name and version as used by the chart label. */}} {{- define "oreilly.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} ================================================ FILE: manifests/06-volumes/oreilly/templates/deployment.yaml ================================================ apiVersion: apps/v1beta2 kind: Deployment metadata: name: {{ template "oreilly.fullname" . }} labels: app: {{ template "oreilly.name" . }} chart: {{ template "oreilly.chart" . }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} spec: replicas: {{ .Values.replicaCount }} selector: matchLabels: app: {{ template "oreilly.name" . }} release: {{ .Release.Name }} template: metadata: labels: app: {{ template "oreilly.name" . }} release: {{ .Release.Name }} spec: containers: - name: {{ .Chart.Name }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: http containerPort: 80 protocol: TCP livenessProbe: httpGet: path: / port: http readinessProbe: httpGet: path: / port: http resources: {{ toYaml .Values.resources | indent 12 }} {{- with .Values.nodeSelector }} nodeSelector: {{ toYaml . | indent 8 }} {{- end }} {{- with .Values.affinity }} affinity: {{ toYaml . | indent 8 }} {{- end }} {{- with .Values.tolerations }} tolerations: {{ toYaml . | indent 8 }} {{- end }} ================================================ FILE: manifests/06-volumes/oreilly/templates/ingress.yaml ================================================ {{- if .Values.ingress.enabled -}} {{- $fullName := include "oreilly.fullname" . -}} {{- $servicePort := .Values.service.port -}} {{- $ingressPath := .Values.ingress.path -}} apiVersion: extensions/v1beta1 kind: Ingress metadata: name: {{ $fullName }} labels: app: {{ template "oreilly.name" . }} chart: {{ template "oreilly.chart" . }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} {{- with .Values.ingress.annotations }} annotations: {{ toYaml . | indent 4 }} {{- end }} spec: {{- if .Values.ingress.tls }} tls: {{- range .Values.ingress.tls }} - hosts: {{- range .hosts }} - {{ . }} {{- end }} secretName: {{ .secretName }} {{- end }} {{- end }} rules: {{- range .Values.ingress.hosts }} - host: {{ . }} http: paths: - path: {{ $ingressPath }} backend: serviceName: {{ $fullName }} servicePort: http {{- end }} {{- end }} ================================================ FILE: manifests/06-volumes/oreilly/templates/service.yaml ================================================ apiVersion: v1 kind: Service metadata: name: {{ template "oreilly.fullname" . }} labels: app: {{ template "oreilly.name" . }} chart: {{ template "oreilly.chart" . }} release: {{ .Release.Name }} heritage: {{ .Release.Service }} spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.port }} targetPort: http protocol: TCP name: http selector: app: {{ template "oreilly.name" . }} release: {{ .Release.Name }} ================================================ FILE: manifests/06-volumes/oreilly/values.yaml ================================================ # Default values for oreilly. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: repository: nginx tag: stable pullPolicy: IfNotPresent service: type: ClusterIP port: 80 ingress: enabled: false annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" path: / hosts: - chart-example.local tls: [] # - secretName: chart-example-tls # hosts: # - chart-example.local resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {} ================================================ FILE: manifests/06-volumes/pv.yaml ================================================ apiVersion: v1 kind: PersistentVolume metadata: name: pvfoo spec: capacity: storage: 1Gi accessModes: - ReadWriteOnce hostPath: path: "/tmp/foo0001" ================================================ FILE: manifests/06-volumes/pvc.yaml ================================================ kind: PersistentVolumeClaim apiVersion: v1 metadata: name: foobar spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi ================================================ FILE: manifests/06-volumes/volumes.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: vol labels: app: vol spec: containers: - image: busybox command: - sleep - "3600" volumeMounts: - mountPath: /busy name: test imagePullPolicy: IfNotPresent name: busy - image: busybox command: - sleep - "3600" volumeMounts: - mountPath: /box name: test imagePullPolicy: IfNotPresent name: box restartPolicy: Always volumes: - name: test emptyDir: {} ================================================ FILE: manifests/07-crd/README.md ================================================   # Custom Resources Definition chapter Several examples are provided. database.yaml is the one from the syllabus, but as you may create any type of CRD, the other manifests are here to broaden your mind :-) ================================================ FILE: manifests/07-crd/bd.yml ================================================ apiVersion: foo.bar/v1 kind: DataBase metadata: name: crazy data: oracle: mysql ================================================ FILE: manifests/07-crd/database.yml ================================================ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: databases.foo.bar spec: group: foo.bar version: v1 scope: Namespaced names: plural: databases singular: database kind: DataBase shortNames: - db ================================================ FILE: manifests/07-crd/db.yml ================================================ apiVersion: foo.bar/v1 kind: DataBase metadata: name: my-new-db spec: type: mysql ================================================ FILE: manifests/08-security/README.md ================================================   # Security and RBAC chapter This example states how you can restrict what a container is allowed to do within a k8s cluster : test.yaml ================================================ FILE: manifests/08-security/nginx.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: nginxsec spec: containers: - image: nginx name: nginx securityContext: runAsNonRoot: true ================================================ FILE: manifests/08-security/test.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: redis spec: containers: - image: bitnami/redis imagePullPolicy: IfNotPresent env: - name: ALLOW_EMPTY_PASSWORD value: "yes" name: redis securityContext: runAsNonRoot: true restartPolicy: Always ================================================ FILE: manifests/README.md ================================================   # Manifests This dir hosts all the K8s manifests used during the training. Some of the chapter are well defined and numbered. The rest are for specific examples and demonstration, and may not relate to a specific chapter in the training. ================================================ FILE: manifests/canary/README.md ================================================ # Canary Demo This demonstrates using 2 deployments to test new code alongside operational code. Each deployment can be scaled to change the proportion of data sent to each. The static files are placed in sub-directories such that configMap can give them the correct filenames by default. Mounting these is demonstrated in `configmap.sh`. ``` ./configmap.sh kubectl create -f blue-deploy.yaml kubectl create -f red-deploy.yaml kubectl create -f redblue-svc.yaml kubectl create -f redblue-ingress.yaml ``` Running this demo includes the following steps: * Create the configMaps. * Create the redblue service. * Create the red deployment which will be captured by the redblue service. * Scale the red deployment (if desired). * Create the blue deployment which will also be captured by the redblue service due to its label, and will be roundrobinned alongside the red deployment. * Scale the blue deployment (if desired). * Scale down the red deployment (to 0) and delete it. * Continue with the new deployment. N.B: This approach was taken as editing the bluered deployment to update labels left a floating rs and pods. This approach managed the pods more 'nicely' ================================================ FILE: manifests/canary/blue-deploy.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: labels: run: test name: blue spec: replicas: 1 selector: matchLabels: run: test template: metadata: labels: run: test spec: containers: - image: nginx name: nginx volumeMounts: - mountPath: /usr/share/nginx/html name: blue volumes: - name: blue configMap: name: blue ================================================ FILE: manifests/canary/blue-files/index.html ================================================
================================================ FILE: manifests/canary/configmap.sh ================================================ #!/bin/sh kubectl create configmap red --from-file=red-files/index.html kubectl create configmap blue --from-file=blue-files/index.html ================================================ FILE: manifests/canary/red-deploy.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: labels: run: test name: red spec: replicas: 2 selector: matchLabels: run: test template: metadata: labels: run: test spec: containers: - image: nginx name: nginx volumeMounts: - mountPath: /usr/share/nginx/html name: red volumes: - name: red configMap: name: red ================================================ FILE: manifests/canary/red-files/index.html ================================================ ================================================ FILE: manifests/canary/redblue-ingress.yaml ================================================ apiVersion: extensions/v1beta1 kind: Ingress metadata: name: redblue spec: rules: - host: redblue.192.168.99.100.nip.io http: paths: - backend: serviceName: redblue servicePort: 80 ================================================ FILE: manifests/canary/redblue-svc.yaml ================================================ apiVersion: v1 kind: Service metadata: name: redblue spec: selector: run: test ports: - port: 80 protocol: TCP ================================================ FILE: manifests/configmaps/README.md ================================================ #### A small demo of how a configmap gets updated inside a running pod. Steps: 1- Create the configmap: kubectl create -f configmap.yaml 2- Create the pod kubectl create -f pod.yaml 3- Get the logs from the pod: kubectl logs -f busybox 4- on another terminal run the update-configmap.sh script ./update-configmap.sh 5- check the logs to see how the confimap data changes inside the running pod ================================================ FILE: manifests/configmaps/configmap.yaml ================================================ apiVersion: v1 data: config.yaml: | version: 4 host: www.example.com ports: - 80 - 9090 kind: ConfigMap metadata: name: config-file ================================================ FILE: manifests/configmaps/foobar.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: busybox namespace: default spec: containers: - image: busybox command: - sleep - "3600" name: busybox volumeMounts: - name: test mountPath: /tmp/test volumes: - name: test configMap: name: foobar ================================================ FILE: manifests/configmaps/pod.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: busybox namespace: default spec: containers: - image: busybox command: - watch - "cat /etc/config/config.yaml" imagePullPolicy: IfNotPresent name: busybox volumeMounts: - name: config-volume mountPath: /etc/config volumes: - name: config-volume configMap: name: config-file ================================================ FILE: manifests/configmaps/update-configmap.sh ================================================ #!/bin/bash sed -i 's/\-\s9090/- 8888/' configmap.yaml kubectl apply -f configmap.yaml ================================================ FILE: manifests/init-container/init.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: init-demo2 labels: topic: initdemo2 spec: containers: - name: python image: python:2.7-alpine workingDir: /tmp/init command: - python - "-m" - SimpleHTTPServer volumeMounts: - name: path mountPath: /tmp/init initContainers: - name: busybox image: busybox command: - wget - "-O" - "/tmp/init/index.html" - http://google.com volumeMounts: - name: path mountPath: /tmp/init volumes: - name: path emptyDir: {} ================================================ FILE: manifests/logging/allinone.yaml ================================================ apiVersion: v1 kind: Namespace metadata: name: logging --- apiVersion: v1 kind: ServiceAccount metadata: name: efk namespace: logging --- kind: PersistentVolume apiVersion: v1 metadata: name: prom001 labels: type: local namespace: logging spec: capacity: storage: 1Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Recycle hostPath: path: "/mnt/sda1/data/data00" --- kind: PersistentVolumeClaim apiVersion: v1 metadata: name: promclaim1 namespace: logging spec: accessModes: - ReadWriteOnce resources: requests: storage: 800M --- apiVersion: v1 kind: Service metadata: annotations: prometheus.io/scrape: 'true' labels: component: efk name: prometheus name: prometheus namespace: logging spec: selector: component: efk app: prometheus type: NodePort ports: - name: prometheus protocol: TCP port: 9090 targetPort: 9090 --- apiVersion: v1 kind: ConfigMap metadata: name: prometheus namespace: logging data: prometheus.yml: |- # A scrape configuration for running Prometheus on a Kubernetes cluster. # This uses separate scrape configs for cluster components (i.e. API server, node) # and services to allow each to use different authentication configs. # # Kubernetes labels will be added as Prometheus labels on metrics via the # `labelmap` relabeling action. # Scrape config for cluster components. scrape_configs: - job_name: 'kubernetes-cluster' scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt insecure_skip_verify: true bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token kubernetes_sd_configs: - api_servers: - 'https://kubernetes.default.svc' in_cluster: true role: apiserver - job_name: 'kubernetes-nodes' scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt insecure_skip_verify: true bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token kubernetes_sd_configs: - api_servers: - 'https://kubernetes.default.svc' in_cluster: true role: node relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) - job_name: 'kubernetes-service-endpoints' kubernetes_sd_configs: - api_servers: - 'https://kubernetes.default.svc' in_cluster: true role: endpoint relabel_configs: - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] action: keep regex: true - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] action: replace target_label: __scheme__ regex: (https?) - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] action: replace target_label: __metrics_path__ regex: (.+) - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] action: replace target_label: __address__ regex: (.+)(?::\d+);(\d+) replacement: $1:$2 - action: labelmap regex: __meta_kubernetes_endpoint_label_(.+) - source_labels: [__meta_kubernetes_service_namespace] action: replace target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_service_name] action: replace target_label: kubernetes_name - job_name: 'kubernetes-service-probes' metrics_path: /probe params: module: [http_2xx] kubernetes_sd_configs: - api_servers: - 'https://kubernetes.default.svc' in_cluster: true role: service relabel_configs: - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] action: keep regex: true - source_labels: [__address__] regex: (.*)(:80)? target_label: __param_target replacement: ${1} - source_labels: [__param_target] regex: (.*) target_label: instance replacement: ${1} - source_labels: [] regex: .* target_label: __address__ replacement: blackbox:9115 # Blackbox exporter. - action: labelmap regex: __meta_kubernetes_service_label_(.+) - source_labels: [__meta_kubernetes_service_namespace] target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_service_name] target_label: kubernetes_name --- apiVersion: extensions/v1beta1 kind: Deployment metadata: name: prometheus namespace: logging labels: component: efk app: prometheus spec: replicas: 1 selector: matchLabels: component: efk app: prometheus template: metadata: name: prometheus labels: component: efk app: prometheus spec: serviceAccount: efk containers: - name: prometheus image: prom/prometheus:latest args: - '-config.file=/etc/prometheus/prometheus.yml' ports: - name: web containerPort: 9090 livenessProbe: httpGet: path: /metrics port: 9090 initialDelaySeconds: 15 timeoutSeconds: 1 volumeMounts: - name: config-volume mountPath: /etc/prometheus - name: prompd mountPath: "/prometheus/data" volumes: - name: config-volume configMap: name: prometheus - name: prompd persistentVolumeClaim: claimName: promclaim1 --- apiVersion: v1 kind: Service metadata: labels: component: efk name: grafana name: grafana namespace: logging spec: selector: component: efk app: grafana type: NodePort ports: - name: grafana protocol: TCP port: 3000 targetPort: 3000 --- apiVersion: extensions/v1beta1 kind: Deployment metadata: name: grafana namespace: logging labels: component: efk app: grafana spec: replicas: 1 selector: matchLabels: component: efk app: grafana template: metadata: name: grafana labels: component: efk app: grafana spec: serviceAccount: efk containers: - name: grafana image: grafana/grafana ports: - name: web containerPort: 3000 volumeMounts: - name: config-volume mountPath: /var/lib/grafana/dashboards - name: ini-volume mountPath: /etc/grafana volumes: - name: config-volume configMap: name: grafana - name: ini-volume configMap: name: ini ================================================ FILE: manifests/logging/configs.yaml ================================================ apiVersion: v1 kind: ConfigMap metadata: name: ini namespace: logging data: grafana.ini: |- ##################### Grafana Configuration Example ##################### # # Everything has defaults so you only need to uncomment things you want to # change # possible values : production, development ; app_mode = production ================================================ FILE: manifests/logging/dashboards.json ================================================ apiVersion: v1 kind: ConfigMap metadata: name: grafana namespace: logging data: dashboards.json: |- ================================================ FILE: manifests/logging/dashboards.yaml ================================================ apiVersion: v1 kind: ConfigMap metadata: name: grafana namespace: logging data: dashboards.json: |- { "__inputs": [ { "name": "DS_PROMETHEUS", "label": "prometheus", "description": "", "type": "datasource", "pluginId": "prometheus", "pluginName": "Prometheus" } ], "__requires": [ { "type": "panel", "id": "singlestat", "name": "Singlestat", "version": "" }, { "type": "panel", "id": "graph", "name": "Graph", "version": "" }, { "type": "grafana", "id": "grafana", "name": "Grafana", "version": "3.1.1" }, { "type": "datasource", "id": "prometheus", "name": "Prometheus", "version": "1.0.0" } ], "id": null, "title": "Kubernetes", "tags": [], "style": "dark", "timezone": "browser", "editable": true, "hideControls": false, "sharedCrosshair": false, "rows": [ { "title": "New row", "height": "250px", "editable": true, "collapse": false, "panels": [ { "title": "Number of Pods", "error": false, "span": 12, "editable": true, "type": "singlestat", "isNew": true, "id": 2, "targets": [ { "refId": "A", "expr": "count(count(container_start_time_seconds{io_kubernetes_pod_name!=\"\"}) by (io_kubernetes_pod_name))", "intervalFactor": 2, "step": 240 } ], "links": [], "datasource": "${DS_PROMETHEUS}", "maxDataPoints": 100, "interval": null, "cacheTimeout": null, "format": "none", "prefix": "", "postfix": "", "nullText": null, "valueMaps": [ { "value": "null", "op": "=", "text": "N/A" } ], "mappingTypes": [ { "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 } ], "rangeMaps": [ { "from": "null", "to": "null", "text": "N/A" } ], "mappingType": 1, "nullPointMode": "connected", "valueName": "avg", "prefixFontSize": "50%", "valueFontSize": "80%", "postfixFontSize": "50%", "thresholds": "20,25", "colorBackground": false, "colorValue": false, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "sparkline": { "show": false, "full": false, "lineColor": "rgb(31, 120, 193)", "fillColor": "rgba(31, 118, 189, 0.18)" }, "gauge": { "show": true, "minValue": 0, "maxValue": 30, "thresholdMarkers": true, "thresholdLabels": false } } ] }, { "collapse": false, "editable": true, "height": "250px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 1, "isNew": true, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": false, "targets": [ { "expr": "count(count(container_start_time_seconds{io_kubernetes_pod_name!=\"\"}) by (io_kubernetes_pod_name))", "intervalFactor": 2, "legendFormat": "", "refId": "A", "step": 20 } ], "timeFrom": null, "timeShift": null, "title": "Pods", "tooltip": { "msResolution": true, "shared": true, "sort": 0, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ] } ], "title": "Row" } ], "time": { "from": "now-3h", "to": "now" }, "timepicker": { "refresh_intervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "time_options": [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, "templating": { "list": [] }, "annotations": { "list": [] }, "refresh": "5s", "schemaVersion": 12, "version": 5, "links": [], "gnetId": null } ================================================ FILE: manifests/logging/grafana.ini ================================================ apiVersion: v1 kind: ConfigMap metadata: name: ini namespace: logging data: grafana.ini: |- ##################### Grafana Configuration Example ##################### # # Everything has defaults so you only need to uncomment things you want to # change # possible values : production, development ; app_mode = production # instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty ; instance_name = ${HOSTNAME} #################################### Paths #################################### [paths] # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) # ;data = /var/lib/grafana # # Directory where grafana can store logs # ;logs = /var/log/grafana # # Directory where grafana will automatically scan and look for plugins # ;plugins = /var/lib/grafana/plugins # #################################### Server ####################################[server] # Protocol (http or https) ;protocol = http # The ip address to bind to, empty will bind to all interfaces ;http_addr = # The http port to use ;http_port = 3000 # The public facing domain name used to access grafana from a browser ;domain = localhost # Redirect to correct domain if host header does not match domain # Prevents DNS rebinding attacks ;enforce_domain = false # The full public facing url ;root_url = %(protocol)s://%(domain)s:%(http_port)s/ # Log web requests ;router_logging = false # the path relative working path ;static_root_path = public # enable gzip ;enable_gzip = false # https certs & key file ;cert_file = ;cert_key = #################################### Database #################################### [database] # Either "mysql", "postgres" or "sqlite3", it's your choice ;type = sqlite3 ;host = 127.0.0.1:3306 ;name = grafana ;user = root ;password = # For "postgres" only, either "disable", "require" or "verify-full" ;ssl_mode = disable # For "sqlite3" only, path relative to data_path setting ;path = grafana.db #################################### Session #################################### [session] # Either "memory", "file", "redis", "mysql", "postgres", default is "file" ;provider = file # Provider config options # memory: not have any config yet # file: session dir path, is relative to grafana data_path # redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana` # mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name` # postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable ;provider_config = sessions # Session cookie name ;cookie_name = grafana_sess # If you use session in https only, default is false ;cookie_secure = false # Session life time, default is 86400 ;session_life_time = 86400 #################################### Analytics #################################### [analytics] # Server reporting, sends usage counters to stats.grafana.org every 24 hours. # No ip addresses are being tracked, only simple counters to track # running instances, dashboard and error counts. It is very helpful to us. # Change this option to false to disable reporting. ;reporting_enabled = true # Set to false to disable all checks to https://grafana.net # for new vesions (grafana itself and plugins), check is used # in some UI views to notify that grafana or plugin update exists # This option does not cause any auto updates, nor send any information # only a GET request to http://grafana.net to get latest versions check_for_updates = true # Google Analytics universal tracking code, only enabled if you specify an id here ;google_analytics_ua_id = #################################### Security #################################### [security] # default admin user, created on startup ;admin_user = admin # default admin password, can be changed before first start of grafana, or in profile settings ;admin_password = admin # used for signing ;secret_key = SW2YcwTIb9zpOOhoPsMm # Auto-login remember days ;login_remember_days = 7 ;cookie_username = grafana_user ;cookie_remember_name = grafana_remember # disable gravatar profile images ;disable_gravatar = false # data source proxy whitelist (ip_or_domain:port separated by spaces) ;data_source_proxy_whitelist = [snapshots] # snapshot sharing options ;external_enabled = true ;external_snapshot_url = https://snapshots-origin.raintank.io ;external_snapshot_name = Publish to snapshot.raintank.io #################################### Users #################################### [users] # disable user signup / registration ;allow_sign_up = true # Allow non admin users to create organizations ;allow_org_create = true # Set to true to automatically assign new users to the default organization (id 1) ;auto_assign_org = true # Default role new users will be automatically assigned (if disabled above is set to true) ;auto_assign_org_role = Viewer # Background text for the user field on the login page ;login_hint = email or username # Default UI theme ("dark" or "light") ;default_theme = dark #################################### Anonymous Auth ########################## [auth.anonymous] # enable anonymous access ;enabled = false # specify organization name that should be used for unauthenticated users ;org_name = Main Org. # specify role for unauthenticated users ;org_role = Viewer #################################### Github Auth ########################## [auth.github] ;enabled = false ;allow_sign_up = false ;client_id = some_id ;client_secret = some_secret ;scopes = user:email,read:org ;auth_url = https://github.com/login/oauth/authorize ;token_url = https://github.com/login/oauth/access_token ;api_url = https://api.github.com/user ;team_ids = ;allowed_organizations = #################################### Google Auth ########################## [auth.google] ;enabled = false ;allow_sign_up = false ;client_id = some_client_id ;client_secret = some_client_secret ;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email ;auth_url = https://accounts.google.com/o/oauth2/auth ;token_url = https://accounts.google.com/o/oauth2/token ;api_url = https://www.googleapis.com/oauth2/v1/userinfo ;allowed_domains = #################################### Auth Proxy ########################## [auth.proxy] ;enabled = false ;header_name = X-WEBAUTH-USER ;header_property = username ;auto_sign_up = true #################################### Basic Auth ########################## [auth.basic] ;enabled = true #################################### Auth LDAP ########################## [auth.ldap] ;enabled = false ;config_file = /etc/grafana/ldap.toml #################################### SMTP / Emailing ########################## [smtp] ;enabled = false ;host = localhost:25 ;user = ;password = ;cert_file = ;key_file = ;skip_verify = false ;from_address = admin@grafana.localhost [emails] ;welcome_email_on_sign_up = false #################################### Logging ########################## [log] # Either "console", "file", "syslog". Default is console and file # Use space to separate multiple modes, e.g. "console file" ;mode = console, file # Either "trace", "debug", "info", "warn", "error", "critical", default is "info" ;level = info # For "console" mode only [log.console] ;level = # log line format, valid options are text, console and json ;format = console # For "file" mode only [log.file] ;level = # log line format, valid options are text, console and json ;format = text # This enables automated log rotate(switch of following options), default is true ;log_rotate = true # Max line number of single file, default is 1000000 ;max_lines = 1000000 # Max size shift of single file, default is 28 means 1 << 28, 256MB ;max_size_shift = 28 # Segment log daily, default is true ;daily_rotate = true # Expired days of log file(delete after max days), default is 7 ;max_days = 7 [log.syslog] ;level = # log line format, valid options are text, console and json ;format = text # Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used. ;network = ;address = # Syslog facility. user, daemon and local0 through local7 are valid. ;facility = # Syslog tag. By default, the process' argv[0] is used. ;tag = #################################### AMQP Event Publisher ########################## [event_publisher] ;enabled = false ;rabbitmq_url = amqp://localhost/ ;exchange = grafana_events ;#################################### Dashboard JSON files ########################## [dashboards.json] ;enabled = true ;path = /var/lib/grafana/dashboards #################################### Internal Grafana Metrics ########################## # Metrics available at HTTP API Url /api/metrics [metrics] # Disable / Enable internal metrics ;enabled = true # Publish interval ;interval_seconds = 10 # Send internal metrics to Graphite ; [metrics.graphite] ; address = localhost:2003 ; prefix = prod.grafana.%(instance_name)s. #################################### Internal Grafana Metrics ########################## # Url used to to import dashboards directly from Grafana.net [grafana_net] url = https://grafana.net ================================================ FILE: manifests/logging/grafana.json ================================================ { "__inputs": [ { "name": "DS_PROMETHEUS", "label": "prometheus", "description": "", "type": "datasource", "pluginId": "prometheus", "pluginName": "Prometheus" } ], "__requires": [ { "type": "panel", "id": "graph", "name": "Graph", "version": "" }, { "type": "grafana", "id": "grafana", "name": "Grafana", "version": "3.1.1" }, { "type": "datasource", "id": "prometheus", "name": "Prometheus", "version": "1.0.0" } ], "id": null, "title": "Kubernetes", "tags": [], "style": "dark", "timezone": "browser", "editable": true, "hideControls": false, "sharedCrosshair": false, "rows": [ { "collapse": false, "editable": true, "height": "250px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 1, "isNew": true, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": false, "targets": [ { "expr": "count(count(container_start_time_seconds{io_kubernetes_pod_name!=\"\"}) by (io_kubernetes_pod_name))", "intervalFactor": 2, "legendFormat": "", "refId": "A", "step": 20 } ], "timeFrom": null, "timeShift": null, "title": "Pods", "tooltip": { "msResolution": true, "shared": true, "sort": 0, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ] } ], "title": "Row" } ], "time": { "from": "now-3h", "to": "now" }, "timepicker": { "refresh_intervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "time_options": [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, "templating": { "list": [] }, "annotations": { "list": [] }, "refresh": "5s", "schemaVersion": 12, "version": 3, "links": [], "gnetId": null } ================================================ FILE: manifests/logging/grafana2.json ================================================ { "__inputs": [ { "name": "DS_PROMETHEUS", "label": "prometheus", "description": "", "type": "datasource", "pluginId": "prometheus", "pluginName": "Prometheus" } ], "__requires": [ { "type": "panel", "id": "singlestat", "name": "Singlestat", "version": "" }, { "type": "panel", "id": "graph", "name": "Graph", "version": "" }, { "type": "grafana", "id": "grafana", "name": "Grafana", "version": "3.1.1" }, { "type": "datasource", "id": "prometheus", "name": "Prometheus", "version": "1.0.0" } ], "id": null, "title": "Kubernetes", "tags": [], "style": "dark", "timezone": "browser", "editable": true, "hideControls": false, "sharedCrosshair": false, "rows": [ { "title": "New row", "height": "250px", "editable": true, "collapse": false, "panels": [ { "title": "Number of Pods", "error": false, "span": 12, "editable": true, "type": "singlestat", "isNew": true, "id": 2, "targets": [ { "refId": "A", "expr": "count(count(container_start_time_seconds{io_kubernetes_pod_name!=\"\"}) by (io_kubernetes_pod_name))", "intervalFactor": 2, "step": 240 } ], "links": [], "datasource": "${DS_PROMETHEUS}", "maxDataPoints": 100, "interval": null, "cacheTimeout": null, "format": "none", "prefix": "", "postfix": "", "nullText": null, "valueMaps": [ { "value": "null", "op": "=", "text": "N/A" } ], "mappingTypes": [ { "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 } ], "rangeMaps": [ { "from": "null", "to": "null", "text": "N/A" } ], "mappingType": 1, "nullPointMode": "connected", "valueName": "avg", "prefixFontSize": "50%", "valueFontSize": "80%", "postfixFontSize": "50%", "thresholds": "20,25", "colorBackground": false, "colorValue": false, "colors": [ "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], "sparkline": { "show": false, "full": false, "lineColor": "rgb(31, 120, 193)", "fillColor": "rgba(31, 118, 189, 0.18)" }, "gauge": { "show": true, "minValue": 0, "maxValue": 30, "thresholdMarkers": true, "thresholdLabels": false } } ] }, { "collapse": false, "editable": true, "height": "250px", "panels": [ { "aliasColors": {}, "bars": false, "datasource": "${DS_PROMETHEUS}", "editable": true, "error": false, "fill": 1, "grid": { "threshold1": null, "threshold1Color": "rgba(216, 200, 27, 0.27)", "threshold2": null, "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "id": 1, "isNew": true, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 2, "links": [], "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "span": 12, "stack": false, "steppedLine": false, "targets": [ { "expr": "count(count(container_start_time_seconds{io_kubernetes_pod_name!=\"\"}) by (io_kubernetes_pod_name))", "intervalFactor": 2, "legendFormat": "", "refId": "A", "step": 20 } ], "timeFrom": null, "timeShift": null, "title": "Pods", "tooltip": { "msResolution": true, "shared": true, "sort": 0, "value_type": "cumulative" }, "type": "graph", "xaxis": { "show": true }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ] } ], "title": "Row" } ], "time": { "from": "now-3h", "to": "now" }, "timepicker": { "refresh_intervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "time_options": [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, "templating": { "list": [] }, "annotations": { "list": [] }, "refresh": "5s", "schemaVersion": 12, "version": 5, "links": [], "gnetId": null } ================================================ FILE: manifests/nodeselector/pod-to-arch-amd64.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: busybox-to-amd64 namespace: default spec: containers: - image: busybox command: - sleep - "3600" imagePullPolicy: IfNotPresent name: busybox nodeSelector: beta.kubernetes.io/arch: amd64 ================================================ FILE: manifests/old/1605207/configmap.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: bleiman labels: app: vol spec: containers: - image: busybox command: - sleep - "3600" volumeMounts: - mountPath: /kubecon name: test imagePullPolicy: IfNotPresent name: busybox restartPolicy: Always volumes: - name: test configMap: name: foobar ================================================ FILE: manifests/old/1605207/foobar.yml ================================================ apiVersion: v1 kind: Pod metadata: name: foobar namespace: oreilly spec: containers: - image: nginx name: nginx ================================================ FILE: manifests/old/1605207/game-svc.yml ================================================ apiVersion: v1 kind: Service metadata: name: game namespace: default spec: selector: app: game ports: - protocol: TCP port: 80 targetPort: 80 type: NodePort ================================================ FILE: manifests/old/1605207/game.yml ================================================ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: game namespace: default spec: replicas: 2 template: metadata: name: game namespace: default labels: app: game spec: containers: - image: runseb/2048 name: game ================================================ FILE: manifests/old/1605207/hostpath.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: hostpath spec: containers: - image: busybox name: busybox command: - sleep - "3600" volumeMounts: - mountPath: /oreilly name: hostpath volumes: - name: hostpath hostPath: path: /data ================================================ FILE: manifests/old/1605207/mysql.yml ================================================ kind: Pod apiVersion: v1 metadata: name: mysql-pvc spec: volumes: - name: data persistentVolumeClaim: claimName: myclaim containers: - name: mysql-pvc image: "mysql:5.5" env: - name: MYSQL_ROOT_PASSWORD value: root volumeMounts: - mountPath: "/var/lib/mysql" name: data ================================================ FILE: manifests/old/1605207/nb.yml ================================================ apiVersion: cool.io/v1 kind: NoteBook metadata: name: crazy labels: kubernetes: rocks ================================================ FILE: manifests/old/1605207/notebooks.yml ================================================ apiVersion: extensions/v1beta1 kind: ThirdPartyResource metadata: name: note-book.cool.io description: "A notebook" versions: - name: v1 ================================================ FILE: manifests/old/1605207/pvc.yaml ================================================ kind: PersistentVolumeClaim apiVersion: v1 metadata: name: myclaim spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi ================================================ FILE: manifests/old/1605207/volumes.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: vol labels: app: vol spec: containers: - image: busybox command: - sleep - "3600" volumeMounts: - mountPath: /busy name: test imagePullPolicy: IfNotPresent name: busy - image: busybox command: - sleep - "3600" volumeMounts: - mountPath: /box name: test imagePullPolicy: IfNotPresent name: box restartPolicy: Always volumes: - name: test emptyDir: {} ================================================ FILE: manifests/scheduling/README.md ================================================ ``` curl -H "Content-Type:application/json" -X POST --data @binding.json http://localhost:8080/api/v1/namespaces/default/pods/foobar-sched/binding/ ``` ================================================ FILE: manifests/scheduling/binding.json ================================================ { "apiVersion": "v1", "kind": "Binding", "metadata": { "name": "foobar" }, "target": { "apiVersion": "v1", "kind": "Node", "name": "minikube" } } ================================================ FILE: manifests/scheduling/foobar.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: toto spec: schedulerName: foobar containers: - name: redis image: redis ================================================ FILE: manifests/scheduling/redis-sched.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: foobar-sched spec: schedulerName: foobar containers: - name: redis image: redis ================================================ FILE: manifests/scheduling/redis-selector.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: foobar-node spec: containers: - name: redis image: redis nodeSelector: foo: bar ================================================ FILE: manifests/scheduling/redis.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: foobar spec: containers: - name: redis image: redis ================================================ FILE: manifests/scheduling/scheduler.py ================================================ #!/usr/bin/env python import time import random import json from kubernetes import client, config, watch config.load_kube_config() v1=client.CoreV1Api() scheduler_name = "foobar" def nodes_available(): ready_nodes = [] for n in v1.list_node().items: for status in n.status.conditions: if status.status == "True" and status.type == "Ready": ready_nodes.append(n.metadata.name) return ready_nodes def scheduler(name, node, namespace="default"): body=client.V1Binding() target=client.V1ObjectReference() target.kind="Node" target.apiVersion="v1" target.name= node meta=client.V1ObjectMeta() meta.name=name body.target=target body.metadata=meta return v1.create_namespaced_binding(namespace, body) def main(): w = watch.Watch() for event in w.stream(v1.list_namespaced_pod, "default"): if event['object'].status.phase == "Pending" and event['object'].spec.scheduler_name == scheduler_name: try: res = scheduler(event['object'].metadata.name, random.choice(nodes_available())) except client.rest.ApiException as e: print json.loads(e.body)['message'] if __name__ == '__main__': main() ================================================ FILE: manifests/security/openssl-generate-certs.sh ================================================ #!/usr/bin/env bash minicube_dir=.minikube client_cert_dir=k8s_client_crts client_username=employee if openssl_bin=$(which openssl) ; then # Test minicube certs for i in crt key ; do if ! [ -f $HOME/$minicube_dir/ca.$i ] ; then echo "Unable to find ca.$i" exit 1 else echo "OK: Found ca.$i" fi done # Create cert directory if mkdir -p $HOME/$client_cert_dir ; then set -e # Generate certs $openssl_bin genrsa -out $HOME/$client_cert_dir/$client_username.key 2048 $openssl_bin req -new -key $HOME/$client_cert_dir/$client_username.key -out $HOME/$client_cert_dir/$client_username.csr -subj "/CN=$client_username/O=bitnami" $openssl_bin x509 -req -in $HOME/$client_cert_dir/$client_username.csr -CA $HOME/$minicube_dir/ca.crt -CAkey $HOME/$minicube_dir/ca.key -CAcreateserial -out $HOME/$client_cert_dir/$client_username.crt -days 500 echo -e "\nCreated in $HOME/$client_cert_dir" ls -1 $HOME/$client_cert_dir/* exit 0 else echo "Unable to create $HOME/$client_cert_dir" exit 1 fi else echo "Unable to find openssl binary in PATH" exit 1 fi #!/usr/bin/env bash if openssl_bin=$(which openssl) ; then if [ -z $SUDO_USER ] ; then username=$USER else username=$SUDO else echo "Sorry, unable to find openssl binary in path" mkdir -p $HOME/k8s_client_certificates else echo "Sorry, unable to find openssl binary in path" fi openssl genrsa -out employee.key 2048 openssl req -new -key employee.key -out employee.csr -subj openssl req -new -key employee.key -out employee.csr -sub "/CN=employee/O=bitnami" openssl req -new -key employee.key -out employee.csr -subj "/CN=employee/O=bitnami" openssl x509 -req -in employee.csr -CA CA_LOCATION/ca.crt -CAkey CA_LOCATION/ca.key -CAcreateserial -out employee.crt -days 500 openssl x509 -req -in employee.csr -CA /home/wire/.minikube/ca.crt -CAkey /home/wire/.minikube/ca.key -CAcreateserial -out employee.crt -days 500 601 openssl rsa -check -in rv-osiris.key 602 openssl rsa -check -in rv-osiris.crt 1967 openssl genrsa -out employee.key 2048 1968 openssl req -new -key employee.key -out employee.csr -subj 1969 openssl req -new -key employee.key -out employee.csr -sub "/CN=employee/O=bitnami" 1970 openssl req -new -key employee.key -out employee.csr -subj "/CN=employee/O=bitnami" 1971 openssl x509 -req -in employee.csr -CA CA_LOCATION/ca.crt -CAkey CA_LOCATION/ca.key -CAcreateserial -out employee.crt -days 500 1972 openssl x509 -req -in employee.csr -CA /home/wire/.minikube/ca.crt -CAkey /home/wire/.minikube/ca.key -CAcreateserial -out employee.crt -days 500 1998 history | grep openssl 1999 history | grep openssl > napsat-script 2013 mv napsat-script openssl-generate-certs.sh 2014 vim openssl-generate-certs.sh 2015 test openssl 2017 test openssl1 2019 vim openssl-generate-certs.sh 2021 vim openssl-generate-certs.sh 2023 history | grep openssl 2024 history | grep openssl >> openssl-generate-certs.sh ================================================ FILE: manifests/security/pawn.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: pawn spec: containers: - image: busybox command: - sleep - "3600" name: pawn securityContext: privileged: true hostNetwork: true hostPID: true restartPolicy: Always ================================================ FILE: manifests/wordpress/march13/mysql-svc.yaml ================================================ apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: run: mysql name: mysql namespace: oreilly selfLink: /api/v1/namespaces/default/services/mysql spec: ports: - port: 3306 protocol: TCP targetPort: 3306 selector: run: mysql sessionAffinity: None type: ClusterIP status: loadBalancer: {} ================================================ FILE: manifests/wordpress/march13/mysql.yaml ================================================ apiVersion: extensions/v1beta1 kind: Deployment metadata: annotations: deployment.kubernetes.io/revision: "1" creationTimestamp: null generation: 1 labels: run: mysql name: mysql namespace: oreilly selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/mysql spec: replicas: 1 selector: matchLabels: run: mysql strategy: rollingUpdate: maxSurge: 1 maxUnavailable: 1 type: RollingUpdate template: metadata: creationTimestamp: null labels: run: mysql spec: containers: - env: - name: MYSQL_ROOT_PASSWORD value: root image: mysql:5.5 imagePullPolicy: IfNotPresent name: mysql resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 status: {} ================================================ FILE: manifests/wordpress/march13/quota.yaml ================================================ apiVersion: v1 kind: ResourceQuota metadata: creationTimestamp: null name: wordpress namespace: oreilly selfLink: /api/v1/namespaces/oreilly/resourcequotas/wordpress spec: hard: pods: "2" status: {} ================================================ FILE: manifests/wordpress/march13/wordpress/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *~ # Various IDEs .project .idea/ *.tmproj ================================================ FILE: manifests/wordpress/march13/wordpress/Chart.yaml ================================================ apiVersion: v1 appVersion: "1.0" description: A wordpress chart for fun name: wordpress version: 0.9.0 ================================================ FILE: manifests/wordpress/march13/wordpress/templates/mysql-svc.yaml ================================================ apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: run: mysql name: mysql namespace: oreilly selfLink: /api/v1/namespaces/default/services/mysql spec: ports: - port: 3306 protocol: TCP targetPort: 3306 selector: run: mysql sessionAffinity: None type: ClusterIP status: loadBalancer: {} ================================================ FILE: manifests/wordpress/march13/wordpress/templates/mysql.yaml ================================================ apiVersion: extensions/v1beta1 kind: Deployment metadata: annotations: deployment.kubernetes.io/revision: "1" creationTimestamp: null generation: 1 labels: run: mysql name: mysql namespace: oreilly selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/mysql spec: replicas: 1 selector: matchLabels: run: mysql strategy: rollingUpdate: maxSurge: 1 maxUnavailable: 1 type: RollingUpdate template: metadata: creationTimestamp: null labels: run: mysql spec: containers: - env: - name: MYSQL_ROOT_PASSWORD value: root image: mysql:5.5 imagePullPolicy: IfNotPresent name: mysql resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 status: {} ================================================ FILE: manifests/wordpress/march13/wordpress/templates/quota.yaml ================================================ apiVersion: v1 kind: ResourceQuota metadata: creationTimestamp: null name: wordpress namespace: oreilly selfLink: /api/v1/namespaces/oreilly/resourcequotas/wordpress spec: hard: pods: "2" status: {} ================================================ FILE: manifests/wordpress/march13/wordpress/templates/wordpress-svc.yaml ================================================ apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: run: wordpress name: wordpress namespace: oreilly selfLink: /api/v1/namespaces/default/services/wordpress spec: externalTrafficPolicy: Cluster ports: - port: 80 protocol: TCP targetPort: 80 selector: run: wordpress sessionAffinity: None type: NodePort status: loadBalancer: {} ================================================ FILE: manifests/wordpress/march13/wordpress/templates/wordpress.yaml ================================================ apiVersion: extensions/v1beta1 kind: Deployment metadata: annotations: deployment.kubernetes.io/revision: "1" creationTimestamp: null generation: 1 labels: run: wordpress name: wordpress namespace: oreilly selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/wordpress spec: replicas: 1 selector: matchLabels: run: wordpress strategy: rollingUpdate: maxSurge: 1 maxUnavailable: 1 type: RollingUpdate template: metadata: creationTimestamp: null labels: run: wordpress spec: containers: - env: - name: WORDPRESS_DB_HOST value: mysql - name: WORDPRESS_DB_PASSWORD value: root image: wordpress imagePullPolicy: Always name: wordpress resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 status: {} ================================================ FILE: manifests/wordpress/march13/wordpress/values.yaml ================================================ # this is a wordpress chart ================================================ FILE: manifests/wordpress/march13/wordpress-ns.yaml ================================================ apiVersion: v1 kind: Namespace metadata: name: oreilly ================================================ FILE: manifests/wordpress/march13/wordpress-svc.yaml ================================================ apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: run: wordpress name: wordpress namespace: oreilly selfLink: /api/v1/namespaces/default/services/wordpress spec: externalTrafficPolicy: Cluster ports: - port: 80 protocol: TCP targetPort: 80 selector: run: wordpress sessionAffinity: None type: NodePort status: loadBalancer: {} ================================================ FILE: manifests/wordpress/march13/wordpress.yaml ================================================ apiVersion: extensions/v1beta1 kind: Deployment metadata: annotations: deployment.kubernetes.io/revision: "1" creationTimestamp: null generation: 1 labels: run: wordpress name: wordpress namespace: oreilly selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/wordpress spec: replicas: 1 selector: matchLabels: run: wordpress strategy: rollingUpdate: maxSurge: 1 maxUnavailable: 1 type: RollingUpdate template: metadata: creationTimestamp: null labels: run: wordpress spec: containers: - env: - name: WORDPRESS_DB_HOST value: mysql - name: WORDPRESS_DB_PASSWORD value: root image: wordpress imagePullPolicy: Always name: wordpress resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 status: {} ================================================ FILE: manifests/wordpress/mysql-secret.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: mysql-secret spec: containers: - image: mysql:5.5 env: - name: MYSQL_ROOT_PASSWORD valueFrom: secretKeyRef: name: foobar key: password imagePullPolicy: IfNotPresent name: mysql restartPolicy: Always ================================================ FILE: manifests/wordpress/mysql.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: mysql spec: containers: - image: mysql:5.5 env: - name: MYSQL_ROOT_PASSWORD value: root imagePullPolicy: IfNotPresent name: mysql restartPolicy: Always ================================================ FILE: manifests/wordpress/secret.json ================================================ { "kind": "Secret", "apiVersion": "v1", "metadata": { "name": "mysql", "creationTimestamp": null }, "data": { "password": "cm9vdA==" } } ================================================ FILE: manifests/wordpress/wordpress/mysql-svc.yaml ================================================ apiVersion: v1 kind: Service metadata: labels: app: mysql name: mysql namespace: wordpress spec: ports: - port: 3306 type: ClusterIP selector: app: mysql ================================================ FILE: manifests/wordpress/wordpress/mysql.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: name: mysql namespace: wordpress spec: replicas: 1 selector: matchLabels: app: mysql template: metadata: labels: app: mysql spec: containers: - name: mysql image: mysql:5.5 ports: - containerPort: 3306 env: - name: MYSQL_ROOT_PASSWORD value: root ================================================ FILE: manifests/wordpress/wordpress/wp-svc.yaml ================================================ apiVersion: v1 kind: Service metadata: labels: app: wordpress name: wordpress namespace: wordpress spec: ports: - port: 80 type: ClusterIP selector: app: wordpress ================================================ FILE: manifests/wordpress/wordpress/wp.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: name: wordpress namespace: wordpress spec: replicas: 1 selector: matchLabels: app: wordpress template: metadata: labels: app: wordpress spec: containers: - name: wordpress image: wordpress ports: - containerPort: 80 env: - name: WORDPRESS_DB_PASSWORD value: root ================================================ FILE: manifests/wordpress/wordpress-secret.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: wp labels: app: wp spec: containers: - image: wordpress env: - name: WORDPRESS_DB_PASSWORD valueFrom: secretKeyRef: name: mysql key: password - name: WORDPRESS_DB_HOST value: mysql imagePullPolicy: IfNotPresent name: wordpress restartPolicy: Always ================================================ FILE: manifests/wordpress/wordpress.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: wp labels: app: wp spec: containers: - image: wordpress env: - name: WORDPRESS_DB_PASSWORD value: root - name: WORDPRESS_DB_HOST value: 127.0.0.1 imagePullPolicy: IfNotPresent name: wordpress - image: mysql:5.5 env: - name: MYSQL_ROOT_PASSWORD value: root imagePullPolicy: IfNotPresent name: mysql restartPolicy: Always ================================================ FILE: manifests/wordpress/wp-svc.yaml ================================================ apiVersion: v1 kind: Service metadata: labels: app: wp name: wp spec: ports: - port: 80 type: NodePort selector: app: wp ================================================ FILE: manifests/wordpress/wp.yaml ================================================ apiVersion: v1 kind: Namespace metadata: name: wordpress --- apiVersion: v1 kind: ResourceQuota metadata: name: counts namespace: wordpress spec: hard: pods: "4" --- apiVersion: v1 kind: Service metadata: labels: app: mysql name: mysql namespace: wordpress spec: ports: - port: 3306 type: ClusterIP selector: app: mysql --- apiVersion: v1 kind: Service metadata: labels: app: wordpress name: wordpress namespace: wordpress spec: ports: - port: 80 type: ClusterIP selector: app: wordpress --- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: wordpress namespace: wordpress spec: rules: - host: wordpress.192.168.99.100.nip.io http: paths: - path: / pathType: Prefix backend: service: name: wordpress port: number: 80 --- apiVersion: apps/v1 kind: Deployment metadata: name: mysql namespace: wordpress spec: replicas: 1 selector: matchLabels: app: mysql template: metadata: name: mysql labels: app: mysql spec: containers: - name: mysql image: mysql:5.5 ports: - containerPort: 3306 env: - name: MYSQL_ROOT_PASSWORD value: root - name: MYSQL_DATABASE value: wordpress --- apiVersion: apps/v1 kind: Deployment metadata: name: wordpress namespace: wordpress spec: replicas: 1 selector: matchLabels: app: wordpress template: metadata: name: wordpress labels: app: wordpress spec: containers: - name: wordpress image: wordpress ports: - containerPort: 80 env: - name: WORDPRESS_DB_USER value: root - name: WORDPRESS_DB_PASSWORD value: root - name: WORDPRESS_DB_HOST value: mysql - name: WORDPRESS_DB_NAME value: wordpress ================================================ FILE: monitoring/grafana-statefulset.yaml ================================================ apiVersion: apps/v1beta1 kind: StatefulSet metadata: name: grafana namespace: monitoring labels: name: grafana spec: replicas: 1 serviceName: grafana template: metadata: labels: name: grafana spec: containers: - image: grafana/grafana:4.5.2 name: grafana imagePullPolicy: IfNotPresent # env: resources: # keep request = limit to keep this container in guaranteed class limits: cpu: 200m memory: 100Mi requests: cpu: 100m memory: 100Mi env: - name: GF_AUTH_BASIC_ENABLED value: "true" - name: GF_AUTH_ANONYMOUS_ENABLED value: "true" - name: GF_AUTH_ANONYMOUS_ORG_ROLE value: Viewer - name: GF_LOG_LEVEL value: warn - name: GF_LOG_MODE value: console - name: GF_METRICS_ENABLED value: "true" - name: GF_SERVER_ROOT_URL value: "%(protocol)s://%(domain)s:%(http_port)s/api/v1/proxy/namespaces/monitoring/services/grafana:3000/" readinessProbe: httpGet: path: /api/org port: 3000 # initialDelaySeconds: 30 # timeoutSeconds: 1 volumeMounts: - name: grafana-data mountPath: /var/lib/grafana # volumes: # - name: grafana-data # hostPath: # path: /srv/var/lib/grafana volumeClaimTemplates: - apiVersion: v1 kind: PersistentVolumeClaim metadata: name: grafana-data namespace: monitoring spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi ================================================ FILE: monitoring/grafana-svc.yaml ================================================ apiVersion: v1 kind: Service metadata: name: grafana namespace: monitoring annotations: prometheus.io/scrape: 'true' labels: name: grafana spec: type: NodePort ports: - port: 3000 protocol: TCP name: webui nodePort: 30100 selector: name: grafana ================================================ FILE: monitoring/monitoring-namespace.yaml ================================================ apiVersion: v1 kind: Namespace metadata: name: monitoring ================================================ FILE: monitoring/node-exporter-daemonset.yaml ================================================ --- apiVersion: extensions/v1beta1 kind: DaemonSet metadata: labels: name: node-exporter name: node-exporter namespace: monitoring spec: template: metadata: labels: name: node-exporter spec: containers: - args: - --collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($|/) - --collector.procfs=/host/proc - --collector.sysfs=/host/sys env: [] image: prom/node-exporter:v0.14.0 livenessProbe: httpGet: path: / port: scrape name: node-exporter ports: - containerPort: 9100 name: scrape readinessProbe: httpGet: path: / port: scrape successThreshold: 2 volumeMounts: - mountPath: /host/proc name: procfs readOnly: true - mountPath: /rootfs name: root readOnly: true - mountPath: /host/sys name: sysfs readOnly: true hostNetwork: true hostPID: true tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master volumes: - hostPath: path: /proc name: procfs - hostPath: path: / name: root - hostPath: path: /sys name: sysfs ================================================ FILE: monitoring/node-exporter-svc.yaml ================================================ apiVersion: v1 kind: Service metadata: labels: name: node-exporter namespace: monitoring annotations: prometheus.io/scrape: 'true' name: node-exporter namespace: monitoring spec: type: ClusterIP clusterIP: None ports: - name: http-metrics port: 9100 protocol: TCP selector: name: node-exporter ================================================ FILE: monitoring/prometheus-config.yaml ================================================ apiVersion: v1 kind: ConfigMap metadata: name: prometheus-config namespace: monitoring data: prometheus.yml: | global: scrape_interval: 10s scrape_timeout: 10s evaluation_interval: 10s rule_files: - "/etc/prometheus-config/*.rules" # # Adapted from # https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml # # A scrape configuration for running Prometheus on a Kubernetes cluster. # This uses separate scrape configs for cluster components (i.e. API server, node) # and services to allow each to use different authentication configs. # # Kubernetes labels will be added as Prometheus labels on metrics via the # `labelmap` relabeling action. # # If you are using Kubernetes 1.7.2 or earlier, please take note of the comments # for the kubernetes-cadvisor job; you will need to edit or remove this job. # Scrape config for API servers. # # Kubernetes exposes API servers as endpoints to the default/kubernetes # service so this uses `endpoints` role and uses relabelling to only keep # the endpoints associated with the default/kubernetes service using the # default named port `https`. This works for single API server deployments as # well as HA API server deployments. scrape_configs: - job_name: 'kubernetes-apiservers' kubernetes_sd_configs: - role: endpoints # Default to scraping over https. If required, just disable this or change to # `http`. scheme: https # This TLS & bearer token file config is used to connect to the actual scrape # endpoints for cluster components. This is separate to discovery auth # configuration because discovery & scraping are two separate concerns in # Prometheus. The discovery auth config is automatic if Prometheus runs inside # the cluster. Otherwise, more config options have to be provided within the #