Repository: IBM/kube101
Branch: master
Commit: 6ea0f2772176
Files: 41
Total size: 102.6 KB
Directory structure:
gitextract_lgonfye7/
├── .gitbook.yaml
├── .github/
│ └── workflows/
│ └── ci.yml
├── .markdownlint.json
├── .travis.yml
├── .verify-links.sh
├── LICENSE
├── README.md
├── demo/
│ └── .keep
├── docs/
│ ├── CONTRIBUTING.md
│ ├── Lab0/
│ │ └── README.md
│ ├── Lab1/
│ │ ├── README.md
│ │ └── script/
│ │ └── script.md
│ ├── Lab2/
│ │ └── README.md
│ ├── Lab3/
│ │ └── README.md
│ ├── Lab4/
│ │ ├── README.md
│ │ └── healthcheck.yml
│ ├── LabD/
│ │ └── README.md
│ ├── MAINTAINERS.md
│ ├── README.md
│ ├── SUMMARY.md
│ ├── bx_login.sh
│ ├── deploy.sh
│ ├── deploy_rollup.sh
│ ├── install_bx.sh
│ ├── slides/
│ │ └── workshop.pptx
│ ├── test_yml.sh
│ └── workshop.pptx
├── mkdocs.yml
└── presentation/
├── .keep
├── IntroductionToKube.pptx
├── Workshop.pptx
└── scripts/
├── README.md
├── all.sh
├── clean.sh
├── common.sh
├── demoscript
├── lab1-1.sh
├── lab1-2.sh
├── lab1-3.sh
├── lab2-1.sh
└── lab2-2.sh
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitbook.yaml
================================================
# Do not edit this file, to adjust the table of contents, modify SUMMARY.md
root: ./docs/
structure:
readme: README.md
summary: SUMMARY.md
================================================
FILE: .github/workflows/ci.yml
================================================
name: ci
on:
push:
branches:
- main
- master
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.x
- run: pip install mkdocs-material
- run: mkdocs gh-deploy --force
================================================
FILE: .markdownlint.json
================================================
{
"line-length": false,
"MD014": false,
"MD033": false,
"MD026": false
}
================================================
FILE: .travis.yml
================================================
---
language: node_js
node_js: 10
before_script:
- npm install markdownlint-cli
script:
- markdownlint -c .markdownlint.json docs --ignore docs/SUMMARY.md
# - ./.verify-links.sh -v docs
================================================
FILE: .verify-links.sh
================================================
#!/bin/bash
# Copyright 2017 The Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script will scan all md (markdown) files for bad references.
# It will look for strings of the form [...](...) and make sure that
# the (...) points to either a valid file in the source tree or, in the
# case of it being an http url, it'll make sure we don't get a 404.
#
# Usage: verify-links.sh [ dir | file ... ]
# default arg is root of our source tree
set -o errexit
set -o nounset
set -o pipefail
verbose=""
debugFlag=""
maxRetries="1"
stop=""
tmp=/tmp/out${RANDOM}
trap clean EXIT
seenFiles=( ":" ) # just to prevent "undefined" errors
# findPrevious will search for a file to see if we've seen it before.
# If we have then return the matching "anchorFile". If we haven't
# seen it then add it to "seenFiles" and create a new "anchorFile".
# $1 == search file
# Note we can't use a map because bash on a mac doesn't support it.
foundAnchor=""
function findPreviousFile() {
for f in "${seenFiles[@]}" ; do
orig=${f%%:*}
if [[ "${orig}" == "$1" ]]; then
foundAnchor=${f#*:}
return 0
fi
done
# Didn't it so create a new anchorFile and save it for next time
foundAnchor="${tmp}-anchors-${RANDOM}-${RANDOM}"
seenFiles+=("$1:${foundAnchor}")
return 1
}
function debug {
if [[ "$debugFlag" != "" ]]; then
(>&2 echo $*)
fi
}
function clean {
rm -f ${tmp}*
}
while [[ "$#" != "0" && "$1" == "-"* ]]; do
opts="${1:1}"
while [[ "$opts" != "" ]]; do
case "${opts:0:1}" in
v) verbose="1" ;;
d) debugFlag="1" ; verbose="1" ;;
t) maxRetries="5" ;;
-) stop="1" ;;
?) echo "Usage: $0 [OPTION]... [DIR|FILE]..."
echo "Verify all links in markdown files."
echo
echo " -v show each file as it is checked"
echo " -d show each href as it is found"
echo " -t retry GETs to http(s) URLs 5 times"
echo " -? show this help text"
echo " -- treat remainder of args as dir/files"
exit 0 ;;
*) echo "Unknown option '${opts:0:1}'"
exit 1 ;;
esac
opts="${opts:1}"
done
shift
if [[ "$stop" == "1" ]]; then
break
fi
done
# echo verbose:$verbose
# echo debugFlag:$debugFlag
# echo args:$*
arg=""
if [ "$*" == "" ]; then
arg="."
fi
mdFiles=$(find $* $arg -name "*.md" | sort | grep -v vendor | grep -v glide)
clean
for file in ${mdFiles}; do
# echo scanning $file
dir=$(dirname $file)
[[ -n "$verbose" ]] && echo "> $file"
# Replace ) with )\n so that each possible href is on its own line.
# Then only grab lines that have [..](..) in them - put results in tmp file.
# If the file doesn't have any lines with [..](..) then skip this file
# Steps:
# tr - convert all \n to a space since newlines shouldn't change anything
# sed - add a \n after each ) since ) ends what we're looking for.
# This makes it so that each href is on a line by itself
# sed - prefix each line with a space so the grep can do [^\\]
# grep - find all lines that match [...](...)
cat $file | \
tr '\n' ' ' | \
sed "s/)/)\n/g" | \
sed "s/^/ /g" | \
grep "[^\\]\[.*\](.*)" > ${tmp}1 || continue
# This sed will extract the href portion of the [..](..) - meaning
# the stuff in the parens.
sed "s/.*\[*\]\([^()]*\)/\1/" < ${tmp}1 > ${tmp}2 || continue
cat ${tmp}2 | while read line ; do
# Strip off the leading and trailing parens, and then spaces
ref=${line#*(}
ref=${ref%)*}
ref=$(echo $ref | sed "s/ *//" | sed "s/ *$//")
# Show all hrefs - mainly for verifying in our tests
debug "Checking: '$ref'"
# An external href (ie. starts with http)
if [ "${ref:0:4}" == "http" ]; then
try=0
while true ; do
if curl -f -s -k --connect-timeout 10 ${ref} > /dev/null 2>&1 ; then
break
fi
let try=try+1
if [ ${try} -eq ${maxRetries} ]; then
extra=""
if [ ${try} -gt 1 ]; then
extra="(tried ${try} times) "
fi
echo $file: Can\'t load url: ${ref} ${extra} | tee -a ${tmp}3
break
fi
sleep 1
done
continue
fi
# Skip "mailto:" refs
if [ "${ref:0:7}" == "mailto:" ]; then
continue
fi
# Local file link (i.e. ref contains a #)
if [[ "${ref/\#}" != "${ref}" ]]; then
# If ref doesn't start with "#" then update filepath
if [ "${ref:0:1}" != "#" ]; then
# Split ref into filepath and the section link
reffile=$(echo ${ref} | awk -F"#" '{print $1}')
fullpath=${dir}/${reffile}
ref=$(echo ${ref} | awk -F"#" '{$1=""; print $0}')
else
fullpath=${file}
ref=${ref:1}
fi
if [[ ! -e "${fullpath}" ]]; then
echo "$file: Can't find referenced file '${fullpath}'" | \
tee -a ${tmp}3
continue
fi
# Remove leading and trailing spaces
ref=$(echo ${ref} | sed 's/^[[:space:]]*//' | sed 's/[[:space:]]*$//')
# If we've seen this file before then grab its processed tmp file
if findPreviousFile "${fullpath}" ; then
anchorFile="${foundAnchor}"
else
anchorFile="${foundAnchor}"
# Search file for sections
used="" # anchors used, seen+twiddled ones
# Find all section headers in the file.
# Remove leading & trailing spaces.
# Lower case it.
# Convert spaces to "-".
# Drop all non alphanumeric chars.
# Twiddle section anchor if we've seen it before.
grep "^[[:space:]]*#" < ${fullpath} | \
sed 's/[[:space:]]*##*[[:space:]]*//' | \
sed 's/[[:space:]]*$//' | \
tr '[:upper:]' '[:lower:]' | \
sed "s/ */-/g" | \
sed "s/[^-a-zA-Z0-9]//g" | while read section ; do
# If we haven't used this exact anchor before just use it now
if [[ "${used}" != *" ${section} "* ]]; then
anchor=${section}
else
# We've used this anchor before so add "-#" to the end.
# Keep adding 1 to "#" until we find a free spot.
let num=1
while true; do
anchor="${section}-${num}"
if [[ "${used}" != *" ${anchor} "* ]]; then
break
fi
let num+=1
done
fi
echo "${anchor}"
used="${used} ${anchor} "
debug "Mapped section '${section}' to '${anchor}'"
done > ${anchorFile} || true
# Add sections of the form <a name="xxx">
grep "<a name=" <${fullpath} | \
sed 's/<a name="/\n<a name="/g' | \
sed 's/^.*<a name="\(.*\)">.*$/\1/' | \
sort | uniq >> ${anchorFile} || true
# echo sections ; cat ${tmp}sections1
fi
# Skip refs of the form #L<num> and assume its pointing to a line
# number of a file and those don't have anchors
if [[ "${ref}" =~ ^L([0-9])+$ ]]; then
continue
fi
# Finally, look for the ref in the list of sections/anchors
debug "Anchor file(${fullpath}): ${anchorFile}"
if ! grep "^${ref}$" ${anchorFile} > /dev/null 2>&1 ; then
echo $file: Can\'t find section \'\#${ref}\' in ${fullpath} | \
tee -a ${tmp}3
fi
continue
fi
newPath=${dir}/${ref}
# And finally make sure the file is there
# debug line: echo ref: $ref "->" $newPath
if [[ ! -e "${newPath}" ]]; then
echo $file: Can\'t find: ${newPath} | tee -a ${tmp}3
fi
done
done
if [ -s ${tmp}3 ]; then exit 1 ; fi
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2018 IBM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: README.md
================================================
# Introduction to Kubernetes
[](https://travis-ci.org/IBM/kube101)
This repository contains introductory material for Kubernetes.
There is a presenter-run [meetup], including [automated scripts] for running a demonstration of Kubernetes as provided by IBM Cloud Kubernetes Service. This should take from 45 minutes to an hour and a half based on the style of the presenter as well as audience participation.
There is a self-guided or host-guided [workshop], with detailed explanations about the principles of operating an application in a Kubernetes environment. Including some time for setup, this should take between 2-4 hours.
[meetup]: ./presentation/IntroductionToKube.pptx
[automated scripts]: ./presentation/scripts
[workshop]: https://ibm.github.io/kube101/
================================================
FILE: demo/.keep
================================================
================================================
FILE: docs/CONTRIBUTING.md
================================================
# Contributing In General
Our project welcomes external contributions! If you have an itch, please feel free to scratch it.
To contribute code or documentation, please submit a pull request to the [GitHub repository](https://github.com/IBM/kube101).
A good way to familiarize yourself with the codebase and contribution process is to look for and tackle low-hanging fruit in the [issue tracker](https://github.com/IBM/kube101/issues). Before embarking on a more ambitious contribution, please quickly get in touch with us via an issue.
**We appreciate your effort, and want to avoid a situation where a contribution requires extensive rework (by you or by us), sits in the queue for a long time, or cannot be accepted at all!**
## Proposing new features
If you would like to implement a new feature, please [raise an issue](https://github.com/IBM/kube101/issues) before sending a pull request so the feature can be discussed.
This is to avoid you spending your valuable time working on a feature that the project developers are not willing to accept into the code base.
## Fixing bugs
If you would like to fix a bug, please [raise an issue](https://github.com/IBM/kube101/issues) before sending a pull request so it can be discussed.
If the fix is trivial or non controversial then this is not usually necessary.
## Merge approval
The project maintainers use LGTM (Looks Good To Me) in comments on the code review to
indicate acceptance. A change requires LGTMs from two of the maintainers of each
component affected. Note that if your initial push does not pass TravisCI your change will not be approved.
For more details, see the [MAINTAINERS](MAINTAINERS.md) page.
================================================
FILE: docs/Lab0/README.md
================================================
# Lab 0. Access a Kubernetes cluster
## Set up your kubernetes environment
For the hands-on labs in this tutorial repository, you will need a kubernetes cluster. One option for creating a cluster is to make use of the Kubernetes as-a-service from the IBM Cloud Kubernetes Service as outlined below.
### Use the IBM Cloud Kubernetes Service
You will need either a paid IBM Cloud account or an IBM Cloud account which is a Trial account (not a Lite account). If you have one of these accounts, use the [Getting Started Guide](https://cloud.ibm.com/docs/containers?topic=containers-getting-started) to create your cluster.
### Use a hosted trial environment
There are a few services that are accessible over the Internet for temporary use. As these are free services, they can sometimes experience periods of limited availablity/quality. On the other hand, they can be a quick way to get started!
* [Play with Kubernetes](https://labs.play-with-k8s.com/) After signing in with your github or docker hub id, click on **Start**, then **Add New Instance** and follow steps shown in terminal to spin up the cluster and add workers.
### Set up on your own workstation
If you would like to configure kubernetes to run on your local workstation for non-production, learning use, there are several options.
* [Minikube](https://kubernetes.io/docs/setup/learning-environment/minikube/) This solution requires the installation of a supported VM provider (KVM, VirtualBox, HyperKit, Hyper-V - depending on platform)
* [Kubernetes in Docker (kind)](https://kind.sigs.k8s.io/) Runs a kubernetes cluster on Docker containers
* [Docker Desktop (Mac)](https://docs.docker.com/docker-for-mac/kubernetes/) [Docker Desktop (Windows)](https://docs.docker.com/docker-for-windows/kubernetes/) Docker Desktop includes a kubernetes environment
* [Microk8s](https://microk8s.io/docs/) Installable kubernetes packaged as an Ubuntu `snap` image.
## Install the IBM Cloud command-line interface
1. As a prerequisite for the IBM Cloud Kubernetes Service plug-in, install the [IBM Cloud command-line interface](https://clis.ng.bluemix.net/ui/home.html). Once installed, you can access IBM Cloud from your command-line with the prefix `bx`.
2. Log in to the IBM Cloud CLI: `ibmcloud login`.
3. Enter your IBM Cloud credentials when prompted.
**Note:** If you have a federated ID, use `ibmcloud login --sso` to log in to the IBM Cloud CLI. Enter your user name, and use the provided URL in your CLI output to retrieve your one-time passcode. You know you have a federated ID when the login fails without the `--sso` and succeeds with the `--sso` option.
## Install the IBM Cloud Kubernetes Service plug-in
1. To create Kubernetes clusters and manage worker nodes, install the IBM Cloud Kubernetes Service plug-in:
```bash
ibmcloud plugin install container-service -r Bluemix
```
**Note:** The prefix for running commands by using the IBM Cloud Kubernetes Service plug-in is `bx cs`.
2. To verify that the plug-in is installed properly, run the following command:
```bash
ibmcloud plugin list
```
The IBM Cloud Kubernetes Service plug-in is displayed in the results as `container-service`.
## Download the Kubernetes CLI
To view a local version of the Kubernetes dashboard and to deploy apps into your clusters, you will need to install the Kubernetes CLI that corresponds with your operating system:
* [OS X](https://storage.googleapis.com/kubernetes-release/release/v1.10.8/bin/darwin/amd64/kubectl)
* [Linux](https://storage.googleapis.com/kubernetes-release/release/v1.10.8/bin/linux/amd64/kubectl)
* [Windows](https://storage.googleapis.com/kubernetes-release/release/v1.10.8/bin/windows/amd64/kubectl.exe)
**For Windows users:** Install the Kubernetes CLI in the same directory as the IBM Cloud CLI. This setup saves you some filepath changes when you run commands later.
**For OS X and Linux users:**
1. Move the executable file to the `/usr/local/bin` directory using the command `mv /<path_to_file>/kubectl /usr/local/bin/kubectl` .
1. Make sure that `/usr/local/bin` is listed in your PATH system variable.
```shell
$ echo $PATH
/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
```
1. Convert the binary file to an executable: `chmod +x /usr/local/bin/kubectl`
## Configure Kubectl to point to IBM Cloud Kubernetes Service
1. List the clusters in your account:
```shell
ibmcloud ks clusters
```
1. Set an environment variable that will be used in subsequent commands in this lab.
```shell
export CLUSTER_NAME=<your_cluster_name>
```
1. Configure `kubectl` to point to your cluster
```shell
ibmcloud ks cluster config --cluster $CLUSTER_NAME
```
1. Validate proper configuration
```shell
kubectl get namespace
```
1. You should see output similar to the following, if so, then your're ready to continue.
```shell
NAME STATUS AGE
default Active 125m
ibm-cert-store Active 121m
ibm-system Active 124m
kube-node-lease Active 125m
kube-public Active 125m
kube-system Active 125m
```
## Download the Workshop Source Code
Repo `guestbook` has the application that we'll be deploying.
While we're not going to build it we will use the deployment configuration files from that repo.
Guestbook application has two versions v1 and v2 which we will use to demonstrate some rollout
functionality later. All the configuration files we use are under the directory guestbook/v1.
Repo `kube101` contains the step by step instructions to run the workshop.
```shell
git clone https://github.com/IBM/guestbook.git
git clone https://github.com/IBM/kube101.git
```
================================================
FILE: docs/Lab1/README.md
================================================
# Lab 1. Deploy your first application
Learn how to deploy an application to a Kubernetes cluster hosted within
the IBM Container Service.
## 0. Prerequisites
Make sure you satisfy the prerequisites as outlined in [Lab 0](../Lab0/README.md)
## 1. Deploy the guestbook application
In this part of the lab we will deploy an application called `guestbook`
that has already been built and uploaded to DockerHub under the name
`ibmcom/guestbook:v1`.
1. Start by running `guestbook`:
```shell
kubectl create deployment guestbook --image=ibmcom/guestbook:v1
```
This action will take a bit of time. To check the status of the running application,
you can use `$ kubectl get pods`.
You should see output similar to the following:
```shell
kubectl get pods
```
Eventually, the status should show up as `Running`.
```shell
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
guestbook-59bd679fdc-bxdg7 1/1 Running 0 1m
```
The end result of the run command is not just the pod containing our application containers,
but a Deployment resource that manages the lifecycle of those pods.
1. Once the status reads `Running`, we need to expose that deployment as a
service so we can access it through the IP of the worker nodes.
The `guestbook` application listens on port 3000. Run:
```shell
kubectl expose deployment guestbook --type="NodePort" --port=3000
```
1. To find the port used on that worker node, examine your new service:
```shell
$ kubectl get service guestbook
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
guestbook NodePort 10.10.10.253 <none> 3000:31208/TCP 1m
```
We can see that our `<nodeport>` is `31208`. We can see in the output the port mapping from 3000 inside
the pod exposed to the cluster on port 31208. This port in the 31000 range is automatically chosen,
and could be different for you.
1. `guestbook` is now running on your cluster, and exposed to the internet. We need to find out where it is accessible.
The worker nodes running in the container service get external IP addresses.
Get the workers for your cluster and note one (any one) of the public IPs listed on the `<public-IP>` line. Replace `$CLUSTER_NAME` with your cluster name unless you have this environment variable set.
```shell
$ kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
10.185.199.3 Ready master,worker 63d v1.16.2+283af84 10.185.199.3 169.59.228.215 Red Hat 3.10.0-1127.13.1.el7.x86_64 cri-o://1.16.6-17.rhaos4.3.git4936f44.el7
10.185.199.6 Ready master,worker 63d v1.16.2+283af84 10.185.199.6 169.47.78.51 Red Hat 3.10.0-1127.13.1.el7.x86_64 cri-o://1.16.6-17.rhaos4.3.git4936f44.el7
```
We can see that our `<EXTERNAL-IP>` is `169.59.228.215`.
1. Now that you have both the address and the port, you can now access the application in the web browser
at `<public-IP>:<nodeport>`. In the example case this is `173.193.99.136:31208`.
Congratulations, you've now deployed an application to Kubernetes!
When you're all done, continue to the
[next lab of this course](../Lab2/README.md).
================================================
FILE: docs/Lab1/script/script.md
================================================
# Pod
In Kubernetes, a group of one or more containers is called a pod. Containers in a pod are deployed together, and are started, stopped, and replicated as a group. The simplest pod definition describes the deployment of a single container. For example, an nginx web server pod might be defined as such:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: mynginx
namespace: default
labels:
run: nginx
spec:
containers:
- name: mynginx
image: nginx:latest
ports:
- containerPort: 80
```
# Labels
In Kubernetes, labels are a system to organize objects into groups. Labels are key-value pairs that are attached to each object. Label selectors can be passed along with a request to the apiserver to retrieve a list of objects which match that label selector.
To add a label to a pod, add a labels section under metadata in the pod definition:
```yaml
apiVersion: v1
kind: Pod
metadata:
labels:
run: nginx
...
```
To label a running pod
```bash
kubectl label pod mynginx type=webserver
pod "mynginx" labeled
```
To list pods based on labels
```bash
kubectl get pods -l type=webserver
NAME READY STATUS RESTARTS AGE
mynginx 1/1 Running 0 21m
```
# Deployments
A Deployment provides declarative updates for pods and replicas. You only need to describe the desired state in a Deployment object, and it will change the actual state to the desired state. The Deployment object defines the following details:
The elements of a Replication Controller definition
The strategy for transitioning between deployments
To create a deployment for a nginx webserver, edit the nginx-deploy.yaml file as
```yaml
apiVersion: apps/v1beta1
kind: Deployment
metadata:
generation: 1
labels:
run: nginx
name: nginx
namespace: default
spec:
replicas: 3
selector:
matchLabels:
run: nginx
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
run: nginx
spec:
containers:
- image: nginx:latest
imagePullPolicy: Always
name: nginx
ports:
- containerPort: 80
protocol: TCP
dnsPolicy: ClusterFirst
restartPolicy: Always
securityContext: {}
terminationGracePeriodSeconds: 30
```
and create the deployment
```bash
kubectl create -f nginx-deploy.yaml
deployment "nginx" created
```
The deployment creates the following objects
```bash
kubectl get all -l run=nginx
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deploy/nginx 3 3 3 3 4m
NAME DESIRED CURRENT READY AGE
rs/nginx-664452237 3 3 3 4m
NAME READY STATUS RESTARTS AGE
po/nginx-664452237-h8dh0 1/1 Running 0 4m
po/nginx-664452237-ncsh1 1/1 Running 0 4m
po/nginx-664452237-vts63 1/1 Running 0 4m
```
# services
Services
Kubernetes pods, as containers, are ephemeral. Replication Controllers create and destroy pods dynamically, e.g. when scaling up or down or when doing rolling updates. While each pod gets its own IP address, even those IP addresses cannot be relied upon to be stable over time. This leads to a problem: if some set of pods provides functionality to other pods inside the Kubernetes cluster, how do those pods find out and keep track of which other?
A Kubernetes Service is an abstraction which defines a logical set of pods and a policy by which to access them. The set of pods targeted by a Service is usually determined by a label selector. Kubernetes offers a simple Endpoints API that is updated whenever the set of pods in a service changes.
To create a service for our nginx webserver, edit the nginx-service.yaml file
```yaml
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
run: nginx
spec:
selector:
run: nginx
ports:
- protocol: TCP
port: 8000
targetPort: 80
type: ClusterIP
```
Create the service
`kubectl create -f nginx-service.yaml`
service "nginx" created
```bash
kubectl get service -l run=nginx
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx 10.254.60.24 <none> 8000/TCP 38s
```
Describe the service:
```bash
kubectl describe service nginx
Name: nginx
Namespace: default
Labels: run=nginx
Selector: run=nginx
Type: ClusterIP
IP: 10.254.60.24
Port: <unset> 8000/TCP
Endpoints: 172.30.21.3:80,172.30.4.4:80,172.30.53.4:80
Session Affinity: None
No events.
```
The above service is associated to our previous nginx pods. Pay attention to the service selector run=nginx field. It tells Kubernetes that all pods with the label run=nginx are associated to this service, and should have traffic distributed amongst them. In other words, the service provides an abstraction layer, and it is the input point to reach all of the associated pods.
================================================
FILE: docs/Lab2/README.md
================================================
# Lab 2: Scale and Update Deployments
In this lab, you'll learn how to update the number of instances
a deployment has and how to safely roll out an update of your application
on Kubernetes.
For this lab, you need a running deployment of the `guestbook` application
from the previous lab. If you need to create it, run:
```shell
kubectl create deployment guestbook --image=ibmcom/guestbook:v1
```
## 1. Scale apps with replicas
A *replica* is a copy of a pod that contains a running service. By having
multiple replicas of a pod, you can ensure your deployment has the available
resources to handle increasing load on your application.
1. `kubectl` provides a `scale` subcommand to change the size of an
existing deployment. Let's increase our capacity from a single running instance of
`guestbook` up to 10 instances:
```shell
kubectl scale --replicas=10 deployment guestbook
```
Kubernetes will now try to make reality match the desired state of
10 replicas by starting 9 new pods with the same configuration as
the first.
1. To see your changes being rolled out, you can run:
```shell
kubectl rollout status deployment guestbook
```
The rollout might occur so quickly that the following messages might
_not_ display:
```shell
$ kubectl rollout status deployment guestbook
Waiting for rollout to finish: 1 of 10 updated replicas are available...
Waiting for rollout to finish: 2 of 10 updated replicas are available...
Waiting for rollout to finish: 3 of 10 updated replicas are available...
Waiting for rollout to finish: 4 of 10 updated replicas are available...
Waiting for rollout to finish: 5 of 10 updated replicas are available...
Waiting for rollout to finish: 6 of 10 updated replicas are available...
Waiting for rollout to finish: 7 of 10 updated replicas are available...
Waiting for rollout to finish: 8 of 10 updated replicas are available...
Waiting for rollout to finish: 9 of 10 updated replicas are available...
deployment "guestbook" successfully rolled out
```
1. Once the rollout has finished, ensure your pods are running by using:
```shell
kubectl get pods
```
You should see output listing 10 replicas of your deployment:
```shell
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
guestbook-562211614-1tqm7 1/1 Running 0 1d
guestbook-562211614-1zqn4 1/1 Running 0 2m
guestbook-562211614-5htdz 1/1 Running 0 2m
guestbook-562211614-6h04h 1/1 Running 0 2m
guestbook-562211614-ds9hb 1/1 Running 0 2m
guestbook-562211614-nb5qp 1/1 Running 0 2m
guestbook-562211614-vtfp2 1/1 Running 0 2m
guestbook-562211614-vz5qw 1/1 Running 0 2m
guestbook-562211614-zksw3 1/1 Running 0 2m
guestbook-562211614-zsp0j 1/1 Running 0 2m
```
**Tip:** Another way to improve availability is to
[add clusters and regions](https://cloud.ibm.com/docs/containers?topic=containers-ha_clusters#ha_clusters)
to your deployment, as shown in the following diagram:

## 2. Update and roll back apps
Kubernetes allows you to do rolling upgrade of your application to a new
container image. This allows you to easily update the running image and also allows you to
easily undo a rollout if a problem is discovered during or after deployment.
In the previous lab, we used an image with a `v1` tag. For our upgrade
we'll use the image with the `v2` tag.
To update and roll back:
1. Using `kubectl`, you can now update your deployment to use the
`v2` image. `kubectl` allows you to change details about existing
resources with the `set` subcommand. We can use it to change the
image being used.
```shell
kubectl set image deployment/guestbook guestbook=ibmcom/guestbook:v2
```
Note that a pod could have multiple containers, each with its own name.
Each image can be changed individually or all at once by referring to the name.
In the case of our `guestbook` Deployment, the container name is also `guestbook`.
Multiple containers can be updated at the same time.
([More information](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#-em-image-em-).)
1. To check the status of the rollout, run:
```shell
kubectl rollout status deployment/guestbook
```
The rollout might occur so quickly that the following messages
might _not_ display:
```shell
$ kubectl rollout status deployment/guestbook
Waiting for rollout to finish: 2 out of 10 new replicas have been updated...
Waiting for rollout to finish: 3 out of 10 new replicas have been updated...
Waiting for rollout to finish: 3 out of 10 new replicas have been updated...
Waiting for rollout to finish: 3 out of 10 new replicas have been updated...
Waiting for rollout to finish: 4 out of 10 new replicas have been updated...
Waiting for rollout to finish: 4 out of 10 new replicas have been updated...
Waiting for rollout to finish: 4 out of 10 new replicas have been updated...
Waiting for rollout to finish: 4 out of 10 new replicas have been updated...
Waiting for rollout to finish: 4 out of 10 new replicas have been updated...
Waiting for rollout to finish: 5 out of 10 new replicas have been updated...
Waiting for rollout to finish: 5 out of 10 new replicas have been updated...
Waiting for rollout to finish: 5 out of 10 new replicas have been updated...
Waiting for rollout to finish: 6 out of 10 new replicas have been updated...
Waiting for rollout to finish: 6 out of 10 new replicas have been updated...
Waiting for rollout to finish: 6 out of 10 new replicas have been updated...
Waiting for rollout to finish: 7 out of 10 new replicas have been updated...
Waiting for rollout to finish: 7 out of 10 new replicas have been updated...
Waiting for rollout to finish: 7 out of 10 new replicas have been updated...
Waiting for rollout to finish: 7 out of 10 new replicas have been updated...
Waiting for rollout to finish: 8 out of 10 new replicas have been updated...
Waiting for rollout to finish: 8 out of 10 new replicas have been updated...
Waiting for rollout to finish: 8 out of 10 new replicas have been updated...
Waiting for rollout to finish: 8 out of 10 new replicas have been updated...
Waiting for rollout to finish: 9 out of 10 new replicas have been updated...
Waiting for rollout to finish: 9 out of 10 new replicas have been updated...
Waiting for rollout to finish: 9 out of 10 new replicas have been updated...
Waiting for rollout to finish: 1 old replicas are pending termination...
Waiting for rollout to finish: 1 old replicas are pending termination...
Waiting for rollout to finish: 1 old replicas are pending termination...
Waiting for rollout to finish: 9 of 10 updated replicas are available...
Waiting for rollout to finish: 9 of 10 updated replicas are available...
Waiting for rollout to finish: 9 of 10 updated replicas are available...
deployment "guestbook" successfully rolled out
```
1. Test the application as before, by accessing `<public-IP>:<nodeport>`
in the browser to confirm your new code is active.
Remember, to get the "nodeport" and "public-ip" use the following commands. Replace `$CLUSTER_NAME` with the name of your cluster if the environment variable is not set.:
```shell
kubectl describe service guestbook
```
and
```shell
kubectl get nodes -o wide
```
To verify that you're running "v2" of guestbook, look at the title of the page,
it should now be `Guestbook - v2`. If you are using a browser, make sure you force refresh (invalidating your cache).
1. If you want to undo your latest rollout, use:
```shell
kubectl rollout undo deployment guestbook
```
You can then use this command to see the status:
```shell
kubectl rollout status deployment/guestbook
```
1. When doing a rollout, you see references to *old* replicas and *new* replicas.
The *old* replicas are the original 10 pods deployed when we scaled the application.
The *new* replicas come from the newly created pods with the different image.
All of these pods are owned by the Deployment.
The deployment manages these two sets of pods with a resource called a ReplicaSet.
We can see the guestbook ReplicaSets with:
```shell
$ kubectl get replicasets -l app=guestbook
NAME DESIRED CURRENT READY AGE
guestbook-5f5548d4f 10 10 10 21m
guestbook-768cc55c78 0 0 0 3h
```
Before we continue, let's delete the application so we can learn about
a different way to achieve the same results:
To remove the deployment, use
```shell
kubectl delete deployment guestbook
```
To remove the service, use:
```shell
kubectl delete service guestbook
```
Congratulations! You deployed the second version of the app. Lab 2
is now complete. Continue to the [next lab of this course](../Lab3/README.md).
================================================
FILE: docs/Lab3/README.md
================================================
# Lab 3: Scale and update apps natively, building multi-tier applications
In this lab you'll learn how to deploy the same guestbook application we
deployed in the previous labs, however, instead of using the `kubectl`
command line helper functions we'll be deploying the application using
configuration files. The configuration file mechanism allows you to have more
fine-grained control over all of resources being created within the
Kubernetes cluster.
Before we work with the application we need to clone a github repo:
```shell
git clone https://github.com/IBM/guestbook.git
```
This repo contains multiple versions of the guestbook application
as well as the configuration files we'll use to deploy the pieces of the application.
Change directory by running the command
```shell
cd guestbook/v1
```
You will find all the
configurations files for this exercise in this directory.
## 1. Scale apps natively
Kubernetes can deploy an individual pod to run an application but when you
need to scale it to handle a large number of requests a `Deployment` is the
resource you want to use.
A Deployment manages a collection of similar pods. When you ask for a specific number of replicas
the Kubernetes Deployment Controller will attempt to maintain that number of replicas at all times.
Every Kubernetes object we create should provide two nested object fields
that govern the object’s configuration: the object `spec` and the object
`status`. Object `spec` defines the desired state, and object `status`
contains Kubernetes system provided information about the actual state of the
resource. As described before, Kubernetes will attempt to reconcile
your desired state with the actual state of the system.
For Object that we create we need to provide the `apiVersion` you are using
to create the object, `kind` of the object we are creating and the `metadata`
about the object such as a `name`, set of `labels` and optionally `namespace`
that this object should belong.
Consider the following deployment configuration for guestbook application
> **guestbook-deployment.yaml**
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: guestbook-v1
labels:
app: guestbook
version: "1.0"
spec:
replicas: 3
selector:
matchLabels:
app: guestbook
template:
metadata:
labels:
app: guestbook
version: "1.0"
spec:
containers:
- name: guestbook
image: ibmcom/guestbook:v1
ports:
- name: http-server
containerPort: 3000
```
The above configuration file create a deployment object named 'guestbook'
with a pod containing a single container running the image
`ibmcom/guestbook:v1`. Also the configuration specifies replicas set to 3
and Kubernetes tries to make sure that at least three active pods are running at
all times.
- Create guestbook deployment
To create a Deployment using this configuration file we use the
following command:
```shell
kubectl create -f guestbook-deployment.yaml
```
- List the pod with label app=guestbook
We can then list the pods it created by listing all pods that
have a label of "app" with a value of "guestbook". This matches
the labels defined above in the yaml file in the
`spec.template.metadata.labels` section.
```shell
kubectl get pods -l app=guestbook
```
When you change the number of replicas in the configuration, Kubernetes will
try to add, or remove, pods from the system to match your request. To can
make these modifications by using the following command:
```shell
kubectl edit deployment guestbook-v1
```
This will retrieve the latest configuration for the Deployment from the
Kubernetes server and then load it into an editor for you. You'll notice
that there are a lot more fields in this version than the original yaml
file we used. This is because it contains all of the properties about the
Deployment that Kubernetes knows about, not just the ones we chose to
specify when we create it. Also notice that it now contains the `status`
section mentioned previously.
To exit the `vi` editor, type `:q!`, of if you made changes that you want to see reflected, save them using `:wq`.
You can also edit the deployment file we used to create the Deployment
to make changes. You should use the following command to make the change
effective when you edit the deployment locally.
```shell
kubectl apply -f guestbook-deployment.yaml
```
This will ask Kubernetes to "diff" our yaml file with the current state
of the Deployment and apply just those changes.
We can now define a Service object to expose the deployment to external
clients.
> **guestbook-service.yaml**
```yaml
apiVersion: v1
kind: Service
metadata:
name: guestbook
labels:
app: guestbook
spec:
ports:
- port: 3000
targetPort: http-server
selector:
app: guestbook
type: LoadBalancer
```
The above configuration creates a Service resource named guestbook. A Service
can be used to create a network path for incoming traffic to your running
application. In this case, we are setting up a route from port 3000 on the
cluster to the "http-server" port on our app, which is port 3000 per the
Deployment container spec.
- Let us now create the guestbook service using the same type of command
we used when we created the Deployment:
```shell
kubectl create -f guestbook-service.yaml
```
- Test guestbook app using a browser of your choice using the url
`<your-cluster-ip>:<node-port>`
Remember, to get the `nodeport` and `public-ip` use the following commands, replacing `$CLUSTER_NAME` with the name of your cluster if the environment variable is not already set.
```shell
kubectl describe service guestbook
```
and
```shell
kubectl get nodes -o wide
```
## 2. Connect to a back-end service
If you look at the guestbook source code, under the `guestbook/v1/guestbook`
directory, you'll notice that it is written to support a variety of data
stores. By default it will keep the log of guestbook entries in memory.
That's ok for testing purposes, but as you get into a more "real" environment
where you scale your application that model will not work because
based on which instance of the application the user is routed to they'll see
very different results.
To solve this we need to have all instances of our app share the same data
store - in this case we're going to use a redis database that we deploy to our
cluster. This instance of redis will be defined in a similar manner to the guestbook.
> **redis-master-deployment.yaml**
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis-master
labels:
app: redis
role: master
spec:
replicas: 1
selector:
matchLabels:
app: redis
role: master
template:
metadata:
labels:
app: redis
role: master
spec:
containers:
- name: redis-master
image: redis:3.2.9
ports:
- name: redis-server
containerPort: 6379
```
This yaml creates a redis database in a Deployment named 'redis-master'.
It will create a single instance, with replicas set to 1, and the guestbook app instances
will connect to it to persist data, as well as read the persisted data back.
The image running in the container is 'redis:3.2.9' and exposes the standard redis port 6379.
- Create a redis Deployment, like we did for guestbook:
```shell
kubectl create -f redis-master-deployment.yaml
```
- Check to see that redis server pod is running:
```shell
$ kubectl get pods -lapp=redis,role=master
NAME READY STATUS RESTARTS AGE
redis-master-q9zg7 1/1 Running 0 2d
```
- Let us test the redis standalone. Replace the pod name `redis-master-q9zg7` with the name of your pod.
```shell
kubectl exec -it redis-master-q9zg7 redis-cli
```
The kubectl exec command will start a secondary process in the specified
container. In this case we're asking for the "redis-cli" command to be
executed in the container named "redis-master-q9zg7". When this process
ends the "kubectl exec" command will also exit but the other processes in
the container will not be impacted.
Once in the container we can use the "redis-cli" command to make sure the
redis database is running properly, or to configure it if needed.
```shell
redis-cli> ping
PONG
redis-cli> exit
```
Now we need to expose the `redis-master` Deployment as a Service so that the
guestbook application can connect to it through DNS lookup.
> **redis-master-service.yaml**
```yaml
apiVersion: v1
kind: Service
metadata:
name: redis-master
labels:
app: redis
role: master
spec:
ports:
- port: 6379
targetPort: redis-server
selector:
app: redis
role: master
```
This creates a Service object named 'redis-master' and configures it to target
port 6379 on the pods selected by the selectors "app=redis" and "role=master".
- Create the service to access redis master:
```shell
kubectl create -f redis-master-service.yaml
```
- Restart guestbook so that it will find the redis service to use database:
```shell
kubectl delete deploy guestbook-v1
kubectl create -f guestbook-deployment.yaml
```
- Test guestbook app using a browser of your choice using the url `<your-cluster-ip>:<node-port>`, or by refreshing the page if you already have the app open in another window.
You can see now that if you open up multiple browsers and refresh the page
to access the different copies of guestbook that they all have a consistent state.
All instances write to the same backing persistent storage, and all instances
read from that storage to display the guestbook entries that have been stored.
We have our simple 3-tier application running but we need to scale the
application if traffic increases. Our main bottleneck is that we only have
one database server to process each request coming though guestbook. One
simple solution is to separate the reads and write such that they go to
different databases that are replicated properly to achieve data consistency.

Create a deployment named 'redis-slave' that can talk to redis database to
manage data reads. In order to scale the database we use the pattern where
we can scale the reads using redis slave deployment which can run several
instances to read. Redis slave deployments is configured to run two replicas.

> **redis-slave-deployment.yaml**
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis-slave
labels:
app: redis
role: slave
spec:
replicas: 2
selector:
matchLabels:
app: redis
role: slave
template:
metadata:
labels:
app: redis
role: slave
spec:
containers:
- name: redis-slave
image: ibmcom/guestbook-redis-slave:v2
ports:
- name: redis-server
containerPort: 6379
```
- Create the pod running redis slave deployment.
```shell
kubectl create -f redis-slave-deployment.yaml
```
- Check if all the slave replicas are running
```shell
$ kubectl get pods -lapp=redis,role=slave
NAME READY STATUS RESTARTS AGE
redis-slave-kd7vx 1/1 Running 0 2d
redis-slave-wwcxw 1/1 Running 0 2d
```
- And then go into one of those pods and look at the database to see
that everything looks right. Replace the pod name `redis-slave-kd7vx` with your own pod name. If you get the back `(empty list or set)` when you print the keys, go to the guestbook application and add an entry!
```shell
$ kubectl exec -it redis-slave-kd7vx redis-cli
127.0.0.1:6379> keys *
1) "guestbook"
127.0.0.1:6379> lrange guestbook 0 10
1) "hello world"
2) "welcome to the Kube workshop"
127.0.0.1:6379> exit
```
Deploy redis slave service so we can access it by DNS name. Once redeployed,
the application will send "read" operations to the `redis-slave` pods while
"write" operations will go to the `redis-master` pods.
> **redis-slave-service.yaml**
```yaml
apiVersion: v1
kind: Service
metadata:
name: redis-slave
labels:
app: redis
role: slave
spec:
ports:
- port: 6379
targetPort: redis-server
selector:
app: redis
role: slave
```
- Create the service to access redis slaves.
```shell
kubectl create -f redis-slave-service.yaml
```
- Restart guestbook so that it will find the slave service to read from.
```shell
kubectl delete deploy guestbook-v1
kubectl create -f guestbook-deployment.yaml
```
- Test guestbook app using a browser of your choice using the url `<your-cluster-ip>:<node-port>`, or by refreshing the page if you have the app open in another window.
That's the end of the lab. Now let's clean-up our environment:
```shell
kubectl delete -f guestbook-deployment.yaml
kubectl delete -f guestbook-service.yaml
kubectl delete -f redis-slave-service.yaml
kubectl delete -f redis-slave-deployment.yaml
kubectl delete -f redis-master-service.yaml
kubectl delete -f redis-master-deployment.yaml
```
================================================
FILE: docs/Lab4/README.md
================================================
# ***UNDER CONSTRUCTION***
## 1. Check the health of apps
Kubernetes uses availability checks (liveness probes) to know when to restart a container. For example, liveness probes could catch a deadlock, where an application is running, but unable to make progress. Restarting a container in such a state can help to make the application more available despite bugs.
Also, Kubernetes uses readiness checks to know when a container is ready to start accepting traffic. A pod is considered ready when all of its containers are ready. One use of this check is to control which pods are used as backends for services. When a pod is not ready, it is removed from load balancers.
In this example, we have defined a HTTP liveness probe to check health of the container every five seconds. For the first 10-15 seconds the `/healthz` returns a `200` response and will fail afterward. Kubernetes will automatically restart the service.
1. Open the `healthcheck.yml` file with a text editor. This configuration script combines a few steps from the previous lesson to create a deployment and a service at the same time. App developers can use these scripts when updates are made or to troubleshoot issues by re-creating the pods:
1. Update the details for the image in your private registry namespace:
```yaml
image: "ibmcom/guestbook:v2"
```
2. Note the HTTP liveness probe that checks the health of the container every five seconds.
```yaml
livenessProbe:
httpGet:
path: /healthz
port: 3000
initialDelaySeconds: 5
periodSeconds: 5
```
3. In the **Service** section, note the `NodePort`. Rather than generating a random NodePort like you did in the previous lesson, you can specify a port in the 30000 - 32767 range. This example uses 30072.
2. Run the configuration script in the cluster. When the deployment and the service are created, the app is available for anyone to see:
```bash
kubectl apply -f healthcheck.yml
```
Now that all the deployment work is done, check how everything turned out. You might notice that because more instances are running, things might run a bit slower.
3. Open a browser and check out the app. To form the URL, combine the IP with the NodePort that was specified in the configuration script. To get the public IP address for the worker node:
```bash
ibmcloud cs workers <cluster-name>
```
In a browser, you'll see a success message. If you do not see this text, don't worry. This app is designed to go up and down.
For the first 10 - 15 seconds, a 200 message is returned, so you know that the app is running successfully. After those 15 seconds, a timeout message is displayed, as is designed in the app.
4. Launch your Kubernetes dashboard:
1. Get your credentials for Kubernetes.
```bash
kubectl config view -o jsonpath='{.users[0].user.auth-provider.config.id-token}'
```
2. Copy the **id-token** value that is shown in the output.
3. Set the proxy with the default port number.
```bash
kubectl proxy
```
Output:
```bash
Starting to serve on 127.0.0.1:8001
```
4. Sign in to the dashboard.
1. Open the following URL in a web browser.
```bash
http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/
```
2. In the sign-on page, select the **Token** authentication method.
3. Then, paste the **id-token** value that you previously copied into the **Token** field and click **SIGN IN**.
In the **Workloads** tab, you can see the resources that you created. From this tab, you can continually refresh and see that the health check is working. In the **Pods** section, you can see how many times the pods are restarted when the containers in them are re-created. You might happen to catch errors in the dashboard, indicating that the health check caught a problem. Give it a few minutes and refresh again. You see the number of restarts changes for each pod.
5. Ready to delete what you created before you continue? This time, you can use the same configuration script to delete both of the resources you created.
```kubectl delete -f healthcheck.yml```
6. When you are done exploring the Kubernetes dashboard, in your CLI, enter `CTRL+C` to exit the `proxy` command.
================================================
FILE: docs/Lab4/healthcheck.yml
================================================
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: hw-demo-deployment
spec:
replicas: 3
template:
metadata:
name: pod-liveness-http
labels:
run: hw-demo-health
test: guestbook-demo
spec:
containers:
- name: hw-demo-container
image: "ibmcom/guestbook:v2"
imagePullPolicy: Always
livenessProbe:
httpGet:
path: /healthz
port: 3000
initialDelaySeconds: 5
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: hw-demo-service
labels:
run: hw-demo-health
spec:
type: NodePort
selector:
run: hw-demo-health
ports:
- protocol: TCP
port: 3000
nodePort: 30072
================================================
FILE: docs/LabD/README.md
================================================
# Optional Debugging Lab - Tips and Tricks for Debugging Applications in Kubernetes
Advanced debugging techniques to reach your pods.
## Pod Logs
You can look at the logs of any of the pods running under your deployments as follows
```shell
kubectl logs <podname>
```
Remember that if you have multiple containers running in your pod, you
have to specify the specific container you want to see logs from.
```shell
kubectl logs <pod-name> <container-name>
```
This subcommand operates like `tail`. Including the `-f` flag will
continue to stream the logs live once the current time is reached.
## kubectl edit and vi
By default, on many Linux and macOS systems, you will be dropped into the editor `vi`.
```shell
export EDITOR=nano
```
On Windows, a copy of `notepad.exe` will be opened with the contents of the file.
## busybox pod
For debugging live, this command frequently helps me:
```shell
kubectl create deployment bb --image busybox --restart=Never -it --rm
```
In the busybox image is a basic shell that contains useful utilities.
Utils I often use are `nslookup` and `wget`.
`nslookup` is useful for testing DNS resolution in a pod.
`wget` is useful for trying to do network requests.
## Service Endpoints
Endpoint resource can be used to see all the service endpoints.
```shell
kubectl get endpoints <service>
```
## ImagePullPolicy
By default Kubernetes will only pull the image on first use. This can
be confusing during development when you expect changes to show up.
You should be aware of the three `ImagePullPolicy`s:
- IfNotPresent - the default, only request the image if not present.
- Always - always request the image.
- Never
More details on image management may be [found here](https://kubernetes.io/docs/concepts/containers/images/).
================================================
FILE: docs/MAINTAINERS.md
================================================
# Maintainers Guide
This guide is intended for maintainers - anybody with commit access to one or more Developer Technology repositories.
## Maintainers
| Name | GitHub | email |
|---|---|---|
| Nathan Fritze | nfritze | nfritz@us.ibm.com |
| Nathan LeViere | nathanleviere | njlevier@gmail.com |
## Methodoology
A master branch. This branch MUST be releasable at all times. Commits and merges against this branch MUST contain only bugfixes and/or security fixes. Maintenance releases are tagged against master.
A develop branch. This branch contains your proposed changes
The remainder of this document details how to merge pull requests to the repositories.
## Merge approval
The project maintainers use LGTM (Looks Good To Me) in comments on the code review to
indicate acceptance. A change requires LGTMs from one of the maintainers of each
component affected.
## Reviewing Pull Requests
We recommend reviewing pull requests directly within GitHub. This allows a public commentary on changes, providing transparency for all users. When providing feedback be civil, courteous, and kind. Disagreement is fine, so long as the discourse is carried out politely. If we see a record of uncivil or abusive comments, we will revoke your commit privileges and invite you to leave the project.
During your review, consider the following points:
## Does the change have impact?
While fixing typos is nice as it adds to the overall quality of the project, merging a typo fix at a time can be a waste of effort. (Merging many typo fixes because somebody reviewed the entire component, however, is useful!) Other examples to be wary of:
Changes in variable names. Ask whether or not the change will make understanding the code easier, or if it could simply a personal preference on the part of the author.
Essentially: feel free to close issues that do not have impact.
## Do the changes make sense?
If you do not understand what the changes are or what they accomplish, ask the author for clarification. Ask the author to add comments and/or clarify test case names to make the intentions clear.
At times, such clarification will reveal that the author may not be using the code correctly, or is unaware of features that accommodate their needs. If you feel this is the case, work up a code sample that would address the issue for them, and feel free to close the issue once they confirm.
## Is this a new feature? If so
Does the issue contain narrative indicating the need for the feature? If not, ask them to provide that information. Since the issue will be linked in the changelog, this will often be a user's first introduction to it.
Are new unit tests in place that test all new behaviors introduced? If not, do not merge the feature until they are!
Is documentation in place for the new feature? (See the documentation guidelines). If not do not merge the feature until it is!
Is the feature necessary for general use cases? Try and keep the scope of any given component narrow. If a proposed feature does not fit that scope, recommend to the user that they maintain the feature on their own, and close the request. You may also recommend that they see if the feature gains traction amongst other users, and suggest they re-submit when they can show such support.
================================================
FILE: docs/README.md
================================================
# IBM Cloud Kubernetes Service Lab
<img src="https://kubernetes.io/images/favicon.png" width="200">
## An introduction to containers
Hey, are you looking for a containers 101 course? Check out our [Docker Essentials](https://developer.ibm.com/courses/all/docker-essentials-extend-your-apps-with-containers/).
Containers allow you to run securely isolated applications with quotas on system resources. Containers started out as an individual feature delivered with the linux kernel. Docker launched with making containers easy to use and developers quickly latched onto that idea. Containers have also sparked an interest in microservice architecture, a design pattern for developing applications in which complex applications are down into smaller, composable pieces which work together.
Watch this [video](https://www.youtube.com/watch?v=wlBhtc31I8c) to learn about production uses of containers.
## Objectives
This lab is an introduction to using Docker containers on Kubernetes in the IBM Cloud Kubernetes Service. By the end of the course, you'll achieve these objectives:
* Understand core concepts of Kubernetes
* Build a Docker image and deploy an application on Kubernetes in the IBM Cloud Kubernetes Service
* Control application deployments, while minimizing your time with infrastructure management
* Add AI services to extend your app
* Secure and monitor your cluster and app
## Prerequisites
* A Pay-As-You-Go or Subscription [IBM Cloud account](https://console.bluemix.net/registration/)
## Virtual machines
Prior to containers, most infrastructure ran not on bare metal, but atop hypervisors managing multiple virtualized operating systems (OSes). This arrangement allowed isolation of applications from one another on a higher level than that provided by the OS. These virtualized operating systems see what looks like their own exclusive hardware. However, this also means that each of these virtual operating systems are replicating an entire OS, taking up disk space.
## Containers
Containers provide isolation similar to VMs, except provided by the OS and at the process level. Each container is a process or group of processes run in isolation. Typical containers explicitly run only a single process, as they have no need for the standard system services. What they usually need to do can be provided by system calls to the base OS kernel.
The isolation on linux is provided by a feature called 'namespaces'. Each different kind of isolation (IE user, cgroups) is provided by a different namespace.
This is a list of some of the namespaces that are commonly used and visible to the user:
* PID - process IDs
* USER - user and group IDs
* UTS - hostname and domain name
* NS - mount points
* NET - network devices, stacks, and ports
* CGROUPS - control limits and monitoring of resources
## VM vs container
Traditional applications are run on native hardware. A single application does not typically use the full resources of a single machine. We try to run multiple applications on a single machine to avoid wasting resources. We could run multiple copies of the same application, but to provide isolation we use VMs to run multiple application instances (VMs) on the same hardware. These VMs have full operating system stacks which make them relatively large and inefficient due to duplication both at runtime and on disk.

Containers allow you to share the host OS. This reduces duplication while still providing the isolation. Containers also allow you to drop unneeded files such as system libraries and binaries to save space and reduce your attack surface. If SSHD or LIBC are not installed, they cannot be exploited.
## Get set up
Before we dive into Kubernetes, you need to provision a cluster for your containerized app. Then you won't have to wait for it to be ready for the subsequent labs.
1. You must install the CLIs per <https://console.ng.bluemix.net/docs/containers/cs_cli_install.html>. If you do not yet have these CLIs and the Kubernetes CLI, do [lab 0](Lab0) before starting the course.
2. If you haven't already, provision a cluster. This can take a few minutes, so let it start first: `ibmcloud cs cluster-create --name <name-of-cluster>`
3. After creation, before using the cluster, make sure it has completed provisioning and is ready for use. Run `ibmcloud cs clusters` and make sure that your cluster is in state "deployed".
4. Then use `ibmcloud cs workers <name-of-cluster>` and make sure that all worker nodes are in state "normal" with Status "Ready".
## Kubernetes and containers: an overview
Let's talk about Kubernetes orchestration for containers before we build an application on it. We need to understand the following facts about it:
* What is Kubernetes, exactly?
* How was Kubernetes created?
* Kubernetes architecture
* Kubernetes resource model
* Kubernetes at IBM
* Let's get started
## What is Kubernetes?
Now that we know what containers are, let's define what Kubernetes is. Kubernetes is a container orchestrator to provision, manage, and scale applications. In other words, Kubernetes allows you to manage the lifecycle of containerized applications within a cluster of nodes (which are a collection of worker machines, for example, VMs, physical machines etc.).
Your applications may need many other resources to run such as Volumes, Networks, and Secrets that will help you to do things such as connect to databases, talk to firewalled backends, and secure keys. Kubernetes helps you add these resources into your application. Infrastructure resources needed by applications are managed declaratively.
**Fast fact:** Other orchestration technologies are Mesos and Swarm.
The key paradigm of kubernetes is it’s Declarative model. The user provides the "desired state" and Kubernetes will do it's best make it happen. If you need 5 instances, you do not start 5 separate instances on your own but rather tell Kubernetes that you need 5 instances and Kubernetes will reconcile the state automatically. Simply at this point you need to know that you declare the state you want and Kubernetes makes that happen. If something goes wrong with one of your instances and it crashes, Kubernetes still knows the desired state and creates a new instances on an available node.
**Fun to know:** Kubernetes goes by many names. Sometimes it is shortened to _k8s_ (losing the internal 8 letters), or _kube_. The word is rooted in ancient Greek and means "Helmsman". A helmsman is the person who steers a ship. We hope you can seen the analogy between directing a ship and the decisions made to orchestrate containers on a cluster.
## How was Kubernetes created?
Google wanted to open source their knowledge of creating and running the internal tools Borg & Omega. It adopted Open Governance for Kubernetes by starting the Cloud Native Computing Foundation (CNCF) and giving Kubernetes to that foundation, therefore making it less influenced by Google directly. Many companies such as RedHat, Microsoft, IBM and Amazon quickly joined the foundation.
Main entry point for the kubernetes project is at [http://kubernetes.io](http://kubernetes.io) and the source code can be found at [https://github.com/kubernetes](https://github.com/kubernetes).
## Kubernetes architecture
At its core, Kubernetes is a data store (etcd). The declarative model is stored in the data store as objects, that means when you say I want 5 instances of a container then that request is stored into the data store. This information change is watched and delegated to Controllers to take action. Controllers then react to the model and attempt to take action to achieve the desired state. The power of Kubernetes is in its simplistic model.
As shown, API server is a simple HTTP server handling create/read/update/delete(CRUD) operations on the data store. Then the controller picks up the change you wanted and makes that happen. Controllers are responsible for instantiating the actual resource represented by any Kubernetes resource. These actual resources are what your application needs to allow it to run successfully.

## Kubernetes resource model
Kubernetes Infrastructure defines a resource for every purpose. Each resource is monitored and processed by a controller. When you define your application, it contains a collection of these resources. This collection will then be read by Controllers to build your applications actual backing instances. Some of resources that you may work with are listed below for your reference, for a full list you should go to [https://kubernetes.io/docs/concepts/](https://kubernetes.io/docs/concepts/). In this class we will only use a few of them, like Pod, Deployment, etc.
* Config Maps holds configuration data for pods to consume.
* Daemon Sets ensure that each node in the cluster runs this Pod
* Deployments defines a desired state of a deployment object
* Events provides lifecycle events on Pods and other deployment objects
* Endpoints allows a inbound connections to reach the cluster services
* Ingress is a collection of rules that allow inbound connections to reach the cluster services
* Jobs creates one or more pods and as they complete successfully the job is marked as completed.
* Node is a worker machine in Kubernetes
* Namespaces are multiple virtual clusters backed by the same physical cluster
* Pods are the smallest deployable units of computing that can be created and managed in Kubernetes
* Persistent Volumes provides an API for users and administrators that abstracts details of how storage is provided from how it is consumed
* Replica Sets ensures that a specified number of pod replicas are running at any given time
* Secrets are intended to hold sensitive information, such as passwords, OAuth tokens, and ssh keys
* Service Accounts provides an identity for processes that run in a Pod
* Services is an abstraction which defines a logical set of Pods and a policy by which to access them - sometimes called a micro-service.
* Stateful Sets is the workload API object used to manage stateful applications.
* and more...

Kubernetes does not have the concept of an application. It has simple building blocks that you are required to compose. Kubernetes is a cloud native platform where the internal resource model is the same as the end user resource model.
## Key resources
A Pod is the smallest object model that you can create and run. You can add labels to a pod to identify a subset to run operations on. When you are ready to scale your application you can use the label to tell Kubernetes which Pod you need to scale. A Pod typically represent a process in your cluster. Pods contain at least one container that runs the job and additionally may have other containers in it called sidecars for monitoring, logging, etc. Essentially a Pod is a group of containers.
When we talk about a application, we usually refer to group of Pods. Although an entire application can be run in a single Pod, we usually build multiple Pods that talk to each other to make a useful application. We will see why separating the application logic and backend database into separate Pods will scale better when we build an application shortly.
Services define how to expose your app as a DNS entry to have a stable reference. We use query based selector to choose which pods are supplying that service.
The user directly manipulates resources via yaml:
```bash
kubectl (create|get|apply|delete) -f myResource.yaml
```
Kubernetes provides us with a client interface through ‘kubectl’. Kubectl commands allow you to manage your applications, manage cluster and cluster resources, by modifying the model in the data store.
## Kubernetes application deployment workflow

1. User via "kubectl" deploys a new application. Kubectl sends the request to the API Server.
2. API server receives the request and stores it in the data store (etcd). Once the request is written to data store, the API server is done with the request.
3. Watchers detects the resource changes and send a notification to controller to act upon it
4. Controller detects the new app and creates new pods to match the desired number# of instances. Any changes to the stored model will be picked up to create or delete Pods.
5. Scheduler assigns new pods to a Node based on a criteria. Scheduler makes decisions to run Pods on specific Nodes in the cluster. Scheduler modifies the model with the node information.
6. Kubelet on a node detects a pod with an assignment to itself, and deploys the requested containers via the container runtime (e.g. Docker). Each Node watches the storage to see what pods it is assigned to run. It takes necessary actions on resource assigned to it like create/delete Pods.
7. Kubeproxy manages network traffic for the pods - including service discovery and load-balancing. Kubeproxy is responsible for communication between Pods that want to interact.
## Lab information
IBM Cloud provides the capability to run applications in containers on Kubernetes. The IBM Cloud Kubernetes Service runs Kubernetes clusters which deliver the following:
* Powerful tools
* Intuitive user experience
* Built-in security and isolation to enable rapid delivery of secure applications
* Cloud services including cognitive capabilities from Watson
* Capability to manage dedicated cluster resources for both stateless applications and stateful workloads
## Lab overview
[Lab 0](Lab0) (Optional): Provides a walkthrough for installing IBM Cloud command-line tools and the Kubernetes CLI. You can skip this lab if you have the IBM Cloud CLI, the container-service plugin, the containers-registry plugin, and the kubectl CLI already installed on your machine.
[Lab 1](Lab1): This lab walks through creating and deploying a simple "guestbook" app written in Go as a net/http Server and accessing it.
[Lab 2](Lab2): Builds on lab 1 to expand to a more resilient setup which can survive having containers fail and recover. Lab 2 will also walk through basic services you need to get started with Kubernetes and the IBM Cloud Kubernetes Service
[Lab 3](Lab3): Builds on lab 2 by increasing the capabilities of the deployed Guestbook application. This lab covers basic distributed application design and how kubernetes helps you use standard design practices.
[Lab 4](Lab4): How to enable your application so Kubernetes can automatically monitor and recover your applications with no user intervention.
[Lab D](LabD): Debugging tips and tricks to help you along your Kubernetes journey. This lab is useful reference that does not follow in a specific sequence of the other labs.
================================================
FILE: docs/SUMMARY.md
================================================
# Summary
<!-- Rules of SUMMARY.md are here: https://docs.gitbook.com/integrations/github/content-configuration#summary -->
<!-- All headings MUST be THREE hashmarks (###) -->
<!-- Indented bullets (4 spaces) will make the first line be a section -->
### Getting Started
* [Lab 0: Get the IBM Cloud Container Service](Lab0/README.md)
### Labs
* [Lab 1. Set up and deploy your first application](Lab1/README.md)
* [Lab 2: Scale and Update Deployments](Lab2/README.md)
* [Lab 3: Scale and update apps natively, building multi-tier applications](Lab3/README.md)
### Resources
* [IBM Developer](https://developer.ibm.com)
================================================
FILE: docs/bx_login.sh
================================================
#!/bin/sh
if [ -z $CF_ORG ]; then
CF_ORG="$BLUEMIX_ORG"
fi
if [ -z $CF_SPACE ]; then
CF_SPACE="$BLUEMIX_SPACE"
fi
if [ -z "$BLUEMIX_API_KEY" ] || [ -z "$BLUEMIX_NAMESPACE" ]; then
echo "Define all required environment variables and rerun the stage."
exit 1
fi
echo "Deploy pods"
echo "bx login -a $CF_TARGET_URL"
bx login -a "$CF_TARGET_URL" -o "$CF_ORG" -s "$CF_SPACE" --apikey "$BLUEMIX_API_KEY"
if [ $? -ne 0 ]; then
echo "Failed to authenticate to IBM Cloud"
exit 1
fi
# Init container clusters
echo "bx cs init"
bx cs init
if [ $? -ne 0 ]; then
echo "Failed to initialize to IBM Cloud Kubernetes Service"
exit 1
fi
# Init container registry
echo "bx cr login"
bx cr login
if [ $? -ne 0 ]; then
echo "Failed to login to the IBM Cloud Container Registry"
exit 1
fi
================================================
FILE: docs/deploy.sh
================================================
#!/bin/bash
echo "Create Demo Application"
IP_ADDR=$(bx cs workers $CLUSTER_NAME | grep normal | awk '{ print $2 }')
if [ -z $IP_ADDR ]; then
echo "$CLUSTER_NAME not created or workers not ready"
exit 1
fi
echo -e "Configuring vars"
exp=$(bx cs cluster-config $CLUSTER_NAME | grep export)
if [ $? -ne 0 ]; then
echo "Cluster $CLUSTER_NAME not created or not ready."
exit 1
fi
eval "$exp"
echo -e "Setting up Stage 3 Watson Deployment yml"
cd Stage3/
# curl --silent "https://raw.githubusercontent.com/IBM/container-service-getting-started-wt/master/Stage3/watson-deployment.yml" > watson-deployment.yml
#
## WILL NEED FOR LOADBALANCER ###
# #Find the line that has the comment about the load balancer and add the nodeport def after this
# let NU=$(awk '/^ # type: LoadBalancer/{ print NR; exit }' guestbook.yml)+3
# NU=$NU\i
# sed -i "$NU\ \ type: NodePort" guestbook.yml #For OSX: brew install gnu-sed; replace sed references with gsed
echo -e "Deleting previous version of Watson Deployment if it exists"
kubectl delete --ignore-not-found=true -f watson-deployment.yml
echo -e "Unbinding previous version of Watson Tone Analyzer if it exists"
bx service list | grep tone
if [ $? -eq 0 ]; then
bx cs cluster-service-unbind $CLUSTER_NAME default tone
fi
echo -e "Deleting previous Watson Tone Analyzer instance if it exists"
bx service delete tone -f
echo -e "Creating new instance of Watson Tone Analyzer named tone..."
bx service create tone_analyzer standard tone
echo -e "Binding Watson Tone Service to Cluster and Pod"
bx cs cluster-service-bind $CLUSTER_NAME default tone
echo -e "Building Watson and Watson-talk images..."
cd watson/
docker build -t registry.ng.bluemix.net/contbot/watson . &> buildout.txt
if [ $? -ne 0 ]; then
echo "Could not create the watson image for the build"
cat buildout.txt
exit 1
fi
docker push registry.ng.bluemix.net/contbot/watson
if [ $? -ne 0 ]; then
echo "Could not push the watson image for the build"
exit 1
fi
cd ..
cd watson-talk/
docker build -t registry.ng.bluemix.net/contbot/watson-talk . &> buildout.txt
if [ $? -ne 0 ]; then
echo "Could not create the watson-talk image for the build"
cat buildout.txt
exit 1
fi
docker push registry.ng.bluemix.net/contbot/watson-talk
if [ $? -ne 0 ] ; then
echo "Could not push the watson image for the build"
exit 1
fi
echo -e "Injecting image namespace into deployment yamls"
cd ..
sed -i "s/<namespace>/${BLUEMIX_NAMESPACE}/" watson-deployment.yml
if [ $? -ne 0 ] ; then
echo "Could not inject image namespace into deployment yaml"
exit 1
fi
echo -e "Creating pods"
kubectl create -f watson-deployment.yml
PORT=$(kubectl get services | grep watson-service | sed 's/.*:\([0-9]*\).*/\1/g')
echo ""
echo "View the watson talk service at http://$IP_ADDR:$PORT"
================================================
FILE: docs/deploy_rollup.sh
================================================
#!/bin/bash
echo "Install IBM Cloud CLI"
. workshop/install_bx.sh
if [ $? -ne 0 ]; then
echo "Failed to install IBM Cloud Kubernetes Service CLI prerequisites"
exit 1
fi
echo "Login to IBM Cloud"
. workshop/bx_login.sh
if [ $? -ne 0 ]; then
echo "Failed to authenticate to IBM Cloud Kubernetes Service"
exit 1
fi
echo "Testing yml files for generalized namespace"
. workshop/test_yml.sh
if [ $? -ne 0 ]; then
echo "Failed to find <namespace> in deployment YAML files"
exit 1
fi
echo "Deploy pods for Stage 3..."
. workshop/deploy.sh
if [ $? -ne 0 ]; then
echo "Failed to Deploy pods for stage 3 to IBM Cloud Kubernetes Service"
exit 1
fi
================================================
FILE: docs/install_bx.sh
================================================
#!/bin/bash
echo "Download IBM Cloud CLI"
wget --quiet --output-document=/tmp/Bluemix_CLI_amd64.tar.gz http://public.dhe.ibm.com/cloud/bluemix/cli/bluemix-cli/latest/Bluemix_CLI_amd64.tar.gz
tar -xf /tmp/Bluemix_CLI_amd64.tar.gz --directory=/tmp
# Create bx alias
echo "#!/bin/sh" >/tmp/Bluemix_CLI/bin/bx
echo "/tmp/Bluemix_CLI/bin/bluemix \"\$@\" " >>/tmp/Bluemix_CLI/bin/bx
chmod +x /tmp/Bluemix_CLI/bin/*
chmod +x /tmp/Bluemix_CLI/bin/cfcli/*
export PATH="/tmp/Bluemix_CLI/bin:$PATH"
# Install IBM Cloud CS plugin
echo "Install the IBM Cloud Kubernetes Service plugin"
bx plugin install container-service -r Bluemix
bx plugin install container-registry -r Bluemix
echo "Install kubectl"
wget --quiet --output-document=/tmp/Bluemix_CLI/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
chmod +x /tmp/Bluemix_CLI/bin/kubectl
if [ -n "$DEBUG" ]; then
bx --version
bx plugin list
fi
================================================
FILE: docs/test_yml.sh
================================================
#!/bin/bash
echo "Testing YAML files for <namespace>"
ls */*.yml
imageLines=`grep image: */*.yml`
namespaceLines=`grep \<namespace\> */*.yml`
if [ "$imageLines" = "$namespaceLines" ]; then
echo "<namespace> found as expected in YAML files"
else
echo "<namespace> NOT FOUND as expected in YAML files"
exit 1
fi
================================================
FILE: mkdocs.yml
================================================
# Project information
site_name: Kube 101 Workshop
site_url: https://ibm.github.io/kube101
site_author: IBM Developer
# Repository
repo_name: kube101
repo_url: https://github.com/ibm/kube101
edit_uri: edit/master/docs
# Navigation
nav:
- Welcome:
- About the workshop: README.md
- Workshop:
- Lab 0. Access a Kubernetes cluster: Lab0/README.md
- Lab 1. Deploy your first application: Lab1/README.md
- Lab 2. Scale and update deployments: Lab2/README.md
- Lab 3. Build multi-tier applications: Lab3/README.md
## DO NOT CHANGE BELOW THIS LINE
# Copyright
copyright: Copyright © 2020 IBM Developer
# Theme
theme:
name: material
font:
text: IBM Plex Sans
code: IBM Plex Mono
icon:
logo: material/library
features:
# - navigation.tabs
- navigation.instant
- navigation.expand
palette:
scheme: default
primary: blue
accent: blue
# Plugins
plugins:
- search
# Customization
extra:
social:
- icon: fontawesome/brands/github
link: https://github.com/ibm
- icon: fontawesome/brands/twitter
link: https://twitter.com/ibmdeveloper
- icon: fontawesome/brands/linkedin
link: https://www.linkedin.com/company/ibm/
- icon: fontawesome/brands/youtube
link: https://www.youtube.com/user/developerworks
- icon: fontawesome/brands/dev
link: https://dev.to/ibmdeveloper
# Extensions
markdown_extensions:
- abbr
- admonition
- attr_list
- def_list
- footnotes
- meta
- toc:
permalink: true
- pymdownx.arithmatex:
generic: true
- pymdownx.betterem:
smart_enable: all
- pymdownx.caret
- pymdownx.critic
- pymdownx.details
- pymdownx.emoji:
emoji_index: !!python/name:materialx.emoji.twemoji
emoji_generator: !!python/name:materialx.emoji.to_svg
- pymdownx.highlight
- pymdownx.inlinehilite
- pymdownx.keys
- pymdownx.mark
- pymdownx.smartsymbols
- pymdownx.snippets:
check_paths: true
- pymdownx.superfences:
custom_fences:
- name: mermaid
class: mermaid
format: !!python/name:pymdownx.superfences.fence_code_format
- pymdownx.tabbed
- pymdownx.tasklist:
custom_checkbox: true
- pymdownx.tilde
================================================
FILE: presentation/.keep
================================================
================================================
FILE: presentation/scripts/README.md
================================================
# Instructions for Running the Presentation Demo Scripts
## The following are needed:
- `bx` executable
- `bx cs` plugin
- `docker` installed
- `kubectl`
## Prereq steps:
- Pick a name for your cluster (e.g. `osscluster`)
- Pick a name for your Registry namespace (e.g. `kube101`)
- Log into IBM Cloud: `bx login` or if you use sso: `bx login --sso`
- Log into the IBM Registry: `bx cr login`
- Create a Kubernetes cluster: `bx cs cluster-create --name osscluster`, if
it doesn't already exist
- Create the registry namespace: `bx cr namespace-add kube101`, if it doesn't
already exist
## Running the demo scripts
- Modify the `common.sh` file to make sure your cluster and registry
namespace values are set properly
- Run each `sh` file as instructed in the [presentation](../Workshop.pptx)
- As the scripts are running press `ENTER` or `space` when it pauses
- If you press `f` it will remove the delay as it types
- If you press `r` it will remove the pauses. So, if you want to do both
press `f` before you press `r`
## Cleaning up
Run `clean.sh` to clean up the environment. It does not erase your
cluster or registry namespace.
## Automated running of scripts
To run all of the script in an automated way, make sure your cluster and
registry namespace are ready and then:
```
SKIP=1 DELAY=0 ./all.sh
```
`SKIP=1` tells it to not pause at each command
`DELAY=0` tells it to not print slowly, simulating typing
================================================
FILE: presentation/scripts/all.sh
================================================
#!/bin/bash
set -e
./lab1-1.sh
./lab1-2.sh
./lab1-3.sh
./lab2-1.sh
./lab2-2.sh
./clean.sh
================================================
FILE: presentation/scripts/clean.sh
================================================
#!/bin/bash
source ./demoscript
comment "Cleaning up..."
kubectl delete deploy/guestbook || true
kubectl delete svc/guestbook || true
================================================
FILE: presentation/scripts/common.sh
================================================
#!/bin/bash
source ./demoscript
# These need to be changed by the person running the demo, or set them
# in your environment prior to running the scripts
NAMESPACE=${NAMESPACE:-kube101}
CLUSTER_NAME=${CLUSTER_NAME:-osscluster}
# Should not need to touch these
DEPLOYMENT_NAME=guestbook
IMAGE_NAME=ibmcom/guestbook
================================================
FILE: presentation/scripts/demoscript
================================================
# SAVE=1 Save output to a tar file for off-line running
# SKIP=1 Do not wait for user to press a key to continue
# RECOVER=1 Use the canned output when a command fails
# USESAVED=1 Use canned output instead of running the commands
# USESAVED=2 Use canned output IFF it exists, otherwise run it
scriptName="${0##.*/}"
bold=$(tput bold)
normal=$(tput sgr0)
delay=${DELAY:-"0.02"}
skip="$SKIP"
save="$SAVE"
saveTar=$(cd $(dirname "$0");pwd)/${scriptName}.tar
recover="$RECOVER"
useSaved="$USESAVED"
trap clean EXIT
function clean {
rm -f out
}
if [[ "${useSaved}" != "" && "${useSaved}" != "2" && ! -e "${saveTar}" ]]; then
echo "Missing saved output file: ${saveTar}"
exit 1
fi
if [[ "${save}" != "" && "${useSaved}" == "" ]]; then
rm -f "${saveTar}"
fi
function myscript() {
if [[ "$(uname)" == Darwin ]]; then
script -q -a /dev/null $*
else
script -efq -a /dev/null -c "$*"
fi
}
function slowType() {
str="$*"
if [[ "$delay" == "0" ]]; then
echo -n $bold$str$normal
return
fi
for i in `seq 0 ${#str}`; do
echo -n $bold${str:$i:1}$normal
sleep $delay
done
}
function slowTty() {
str="$*"
echo -n $bold >&3
if [[ "$delay" == "0" ]]; then
echo -n "$str"
else
for i in `seq 0 ${#str}`; do
echo -n "${str:$i:1}"
sleep $delay
done
fi
sleep 0.2 # just to give the other program time to show its input
echo -n $normal >&3
}
function readChar() {
read -s -n 1 ch
case "$ch" in
f ) delay="0" ;;
s ) delay=${DELAY:-"0.02"} ;;
r ) skip="x" ;;
esac
}
function pause() {
if [[ "$skip" == "" ]]; then
readChar
else
sleep 0.2
fi
}
cmdNum=0
function doit() {
local ignorerc=""
local shouldfail=""
local noexec=""
local fakeit=${useSaved:-}
local noscroll=${NOSCROLL:-}
local postcmd=""
while [[ "$1" == "--"* ]]; do
opt="$1"
shift
case "$opt" in
--ignorerc ) ignorerc="1" ;;
--shouldfail ) shouldfail="1" ;;
--noexec ) noexec="1" ;;
--usesaved ) fakeit="1" ;;
--noscroll ) noscroll="1" ;;
--scroll ) noscroll="" ;;
--post* ) postcmd="${opt#*=}" ;;
esac
done
set +e
echo -n $bold"$"$normal" "
pause
slowType $*
echo "$*" >> cmds
pause
echo
saveFile="run.${cmdNum}"
local lines=$(tput lines)
let lines=lines-3
moreCMD="more -$lines"
if [[ "$skip" != "" || "$noscroll" != "" ]]; then
moreCMD="cat"
fi
if [[ "$postcmd" != "" ]]; then
moreCMD="$postcmd | $moreCMD"
fi
# Unless we're told to not execute it, do it
if [[ "$noexec" == "" ]]; then
if [[ "$fakeit" != "" ]]; then
# Faking it!
if tar -xf "${saveTar}" "${saveFile}" > /dev/null 2>&1; then
# echo "** Using saved output ${saveFile} **"
cp "${saveFile}" out
rm "${saveFile}"
cat out | eval ${moreCMD[@]}
if [[ "$shouldfail" == "" ]]; then
rc=0
else
rc=1
fi
else
if [[ "$fakeit" == "2" ]]; then
# file doesn't exist so just try to run it instead
fakeit=""
else
echo -n > out
fi
fi
fi
if [[ "$fakeit" == "" ]]; then
# Run the cmd
bash -c " $* " 2>&1 | tee out | eval ${moreCMD[@]}
rc=${PIPESTATUS[0]}
# Save the output if we're asked to
if [[ "$save" != "" ]]; then
cp out "${saveFile}"
# tar --delete -f "${saveTar}" "${saveFile}" > /dev/null 2>&1 || true
tar -rf "${saveTar}" "${saveFile}"
rm "${saveFile}"
fi
fi
# If the cmd failed see if we should use the canned output
if [[ "$recover" != "" ]]; then
if [[ ( ( "$shouldfail" == "" && "$rc" != "0" ) || \
( "$shouldfail" != "" && "$rc" == "0" ) ) ]] && \
tar -xf "${saveTar}" "${saveFile}" > /dev/null 2>&1 ; then
# echo "** Using saved output ${saveFile} **"
cp "${saveFile}" out
rm "${saveFile}"
fi
fi
let cmdNum=cmdNum+1
else
# We're not really executing it, just showing the cmd
echo -n > out
rc=0
fi
echo
if [[ "$ignorerc" == "" ]]; then
# We're not totally ignoring the exit code
if [[ "$shouldfail" != "" ]]; then
# We need to make sure the command failed as expected
if [[ "$rc" == "0" ]]; then
echo "Expected non-zero exit code, got: $rc"
exit 1
fi
else
# Normal non-zero exit code expected case
if [[ "$rc" != "0" ]]; then
echo "Non-zero exit code: $rc"
exit 1
fi
fi
fi
set -e
}
function background() {
echo -n $bold"$"$normal" "
slowType $*
echo "$*" >> cmds
echo
bash -c " $* " &
}
function ttyDoit() {
local ignorerc=""
local shouldfail=""
while [[ "$1" == "--"* ]]; do
opt="$1"
shift
case "$opt" in
--ignorerc ) ignorerc="1" ;;
--shouldfail ) shouldfail="1" ;;
esac
done
echo -n $bold"$"$normal" "
pause
slowType "$*"
pause
echo
exec 3>&1
set +e
(
sleep 0.2
while read -u 10 line ; do
dontWait=""
if [[ "$line" == "run "* ]]; then
line=${line:4}
${line}
continue
fi
if [[ "$line" == "@"* ]]; then
# Lines starting with "@" will be executed
# immediately w/o pausing before or after showing it
dontWait="x"
line=${line:1}
fi
if [[ "$dontWait" == "" ]]; then pause ; fi
slowTty $line
if [[ "$dontWait" == "" ]]; then pause ; fi
echo
sleep 0.2
done
echo
) | myscript $*
rc=${PIPESTATUS[1]}
echo -n $normal
echo
[[ "$ignorerc" == "" && "$rc" != "0" ]] && echo "Non-zero exit code" && exit 1
[[ "$shouldfail" != "" && "$rc" == "0" ]] && echo "Expected non-zero exit code" && exit 1
set -e
}
function comment() {
local LF="\\n"
local CR=${LF}
local echoopt=""
local dopause=""
local dopauseafter=""
local nohash=""
while [[ "$1" == "--"* ]]; do
opt="$1"
shift
case "$opt" in
--nolf ) LF="" ;;
--nocr ) CR="" ; LF="" ;;
--pause ) dopause="1" ; dopauseafter="1" ;;
--pauseafter ) dopauseafter="1" ;;
--nohash ) nohash="1" ;;
esac
done
if [[ "$nohash" == "" ]]; then
echo -en $bold\#" "$normal
fi
if [[ "$dopause" == "1" ]]; then
pause
fi
echo -en ${echoopt} "$bold$*$normal"
if [[ "$dopause" == "1" || "$dopauseafter" == "1" ]]; then
pause
fi
echo -en ${echoopt} "${CR}${LF}"
}
# Wait until the passed in cmd returns true
function wait() {
# set -x
if [[ "${useSaved}" != "" ]]; then
return
fi
if [ "$1" == "!" ]; then
shift
while (bash -c " $* " &> /dev/null); do
sleep 1
done
else
while !(bash -c " $* " &> /dev/null); do
sleep 1
done
fi
# set +x
}
function scroll() {
local lines=$(tput lines)
let lines=lines-3
echo -n $bold"$"$normal" "
# set +e
pause
if [[ "$skip" == "" ]]; then
slowType more $*
else
slowType cat $*
fi
pause
echo
if [[ "$skip" == "" ]]; then
more -$lines $*
else
cat $*
fi
echo
}
================================================
FILE: presentation/scripts/lab1-1.sh
================================================
#!/bin/bash
source ./common.sh
comment "Pull the first version of our docker image"
DIR=$(pwd)
cd ../../workshop/Lab1
doit docker pull ${IMAGE_NAME}:v1
cd $DIR
# We're going to test the image locally with Docker before we run it on Kube.
# This is to allow us ot see what the output looks like in advance.
# Notice we're mapping port 8080 in the container to 32768 on the host.
comment "First run/test locally"
doit docker run -itd -p 32768:3000 ${IMAGE_NAME}:v1
CID=$(cat out) # Save the container ID
# Now that our image is running, we will use curl to access the content
comment "Test it"
doit curl -s localhost:32768/hello
comment "Clean up container"
doit docker rm -f ${CID}
comment --pauseafter "*** End of "$(basename $0)
================================================
FILE: presentation/scripts/lab1-2.sh
================================================
#!/bin/bash
source ./common.sh
comment "Deploy our app on kubernetes, using the image in our registry"
doit kubectl run ${DEPLOYMENT_NAME} --image=${IMAGE_NAME}:v1
comment "The result of our run command is a deployment."
doit kubectl get deployment ${DEPLOYMENT_NAME}
comment --nolf "Notice the desired & current states."
comment --nolf "Kubernetes is reconciling to achieve our objective"
comment "The actual unit of work is running in a pod"
doit kubectl get pods -l run=guestbook
comment "We can see that it is ready and running"
comment --pauseafter "*** End of "$(basename $0)
================================================
FILE: presentation/scripts/lab1-3.sh
================================================
#!/bin/bash
source ./common.sh
comment --nolf "Our app is running in our kubernetes cluster, but it is not reachable"
comment "We need to EXPOSE it before it is useful."
doit kubectl expose deployment ${DEPLOYMENT_NAME} --type="NodePort" --port=3000
comment --nolf "Now that it is exposed, curl it like we did before with docker"
comment "But first, get address of worker node"
doit bx cs workers ${CLUSTER_NAME} --json
WORKER_IP=$(cat out | grep publicIP | sed "s/.*\"\([0-9].*\)\".*/\1/g" )
# In that output, we can see the public IP
# Next we need the node port assignment of the applicationon the cluster.
# It was automatically assigned by the kubernetes runtime"
comment --pauseafter "Notice the 'publicIP' field"
comment "Get the nodePort of the service"
doit kubectl get svc guestbook -ojson
SERVICE_PORT=$(cat out | grep nodePort | sed "s/.*: *\([0-9]*\).*/\1/g")
comment --pauseafter "Notice the 'nodePort' field"
comment "Curl it..."
doit curl -s ${WORKER_IP}:${SERVICE_PORT}/hello
comment --pauseafter "*** End of "$(basename $0)
================================================
FILE: presentation/scripts/lab2-1.sh
================================================
#!/bin/bash
source ./common.sh
SERVICE_PORT=$(kubectl get svc guestbook -ojson | grep nodePort | sed "s/.*: *\([0-9]*\).*/\1/g")
WORKER_IP=$(bx cs workers ${CLUSTER_NAME} --json | grep publicIP | sed "s/.*\"\([0-9].*\)\".*/\1/g" )
GUESTBOOK_CURL=${WORKER_IP}:${SERVICE_PORT}/hello
# make sure the service works
comment "We're starting with the same output as the previous lab"
doit curl -s ${GUESTBOOK_CURL}
comment we have the pods
doit kubectl get pods -l run=guestbook
# these pods come from the deployment
# kubectl get deployment ${DEPLOYMENT_NAME}
comment --nolf "You should start this watch in a separate terminal"
comment " watch -d -n 0.2 curl -s ${GUESTBOOK_CURL}"
comment --pauseafter "Press ENTER when ready"
replicas=$(( ( RANDOM % 5 ) + 2 ))
comment --nocr --nohash --pauseafter "How many replicas? "
comment --nohash $replicas
# use `kubectl scale`
doit kubectl scale deployment ${DEPLOYMENT_NAME} --replicas ${replicas}
# show the pods
doit kubectl get pods -l run=guestbook
comment --pauseafter "*** End of "$(basename $0)
================================================
FILE: presentation/scripts/lab2-2.sh
================================================
#!/bin/bash
source ./common.sh
# `kubectl set image` to change the underlying image to our v2 version
comment "Update the deployment with the new image"
doit kubectl set image deployment ${DEPLOYMENT_NAME} guestbook=${IMAGE_NAME}:v2
doit kubectl describe deployment ${DEPLOYMENT_NAME}
line=$(grep Image out)
comment --nolf "Notice where is shows:"
comment "$line"
comment --pauseafter "*** End of "$(basename $0)
gitextract_lgonfye7/
├── .gitbook.yaml
├── .github/
│ └── workflows/
│ └── ci.yml
├── .markdownlint.json
├── .travis.yml
├── .verify-links.sh
├── LICENSE
├── README.md
├── demo/
│ └── .keep
├── docs/
│ ├── CONTRIBUTING.md
│ ├── Lab0/
│ │ └── README.md
│ ├── Lab1/
│ │ ├── README.md
│ │ └── script/
│ │ └── script.md
│ ├── Lab2/
│ │ └── README.md
│ ├── Lab3/
│ │ └── README.md
│ ├── Lab4/
│ │ ├── README.md
│ │ └── healthcheck.yml
│ ├── LabD/
│ │ └── README.md
│ ├── MAINTAINERS.md
│ ├── README.md
│ ├── SUMMARY.md
│ ├── bx_login.sh
│ ├── deploy.sh
│ ├── deploy_rollup.sh
│ ├── install_bx.sh
│ ├── slides/
│ │ └── workshop.pptx
│ ├── test_yml.sh
│ └── workshop.pptx
├── mkdocs.yml
└── presentation/
├── .keep
├── IntroductionToKube.pptx
├── Workshop.pptx
└── scripts/
├── README.md
├── all.sh
├── clean.sh
├── common.sh
├── demoscript
├── lab1-1.sh
├── lab1-2.sh
├── lab1-3.sh
├── lab2-1.sh
└── lab2-2.sh
Condensed preview — 41 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (111K chars).
[
{
"path": ".gitbook.yaml",
"chars": 145,
"preview": "# Do not edit this file, to adjust the table of contents, modify SUMMARY.md\n\nroot: ./docs/\n\nstructure:\n readme: README."
},
{
"path": ".github/workflows/ci.yml",
"chars": 312,
"preview": "name: ci\non:\n push:\n branches:\n - main\n - master\njobs:\n deploy:\n runs-on: ubuntu-latest\n steps:\n "
},
{
"path": ".markdownlint.json",
"chars": 88,
"preview": "{\n \"line-length\": false,\n \"MD014\": false,\n \"MD033\": false,\n \"MD026\": false\n}"
},
{
"path": ".travis.yml",
"chars": 193,
"preview": "---\nlanguage: node_js\nnode_js: 10\n\nbefore_script:\n - npm install markdownlint-cli\nscript:\n - markdownlint -c .markdown"
},
{
"path": ".verify-links.sh",
"chars": 8167,
"preview": "#!/bin/bash\n\n# Copyright 2017 The Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may"
},
{
"path": "LICENSE",
"chars": 10751,
"preview": "\n Apache License\n Version 2.0, January 2004\n "
},
{
"path": "README.md",
"chars": 839,
"preview": "# Introduction to Kubernetes\n\n[](https://travis-ci.o"
},
{
"path": "demo/.keep",
"chars": 0,
"preview": ""
},
{
"path": "docs/CONTRIBUTING.md",
"chars": 1679,
"preview": "# Contributing In General\n\nOur project welcomes external contributions! If you have an itch, please feel free to scratch"
},
{
"path": "docs/Lab0/README.md",
"chars": 5723,
"preview": "# Lab 0. Access a Kubernetes cluster\n\n## Set up your kubernetes environment\n\nFor the hands-on labs in this tutorial repo"
},
{
"path": "docs/Lab1/README.md",
"chars": 3410,
"preview": "# Lab 1. Deploy your first application\n\nLearn how to deploy an application to a Kubernetes cluster hosted within\nthe IBM"
},
{
"path": "docs/Lab1/script/script.md",
"chars": 5087,
"preview": "\n# Pod\n\nIn Kubernetes, a group of one or more containers is called a pod. Containers in a pod are deployed together, and"
},
{
"path": "docs/Lab2/README.md",
"chars": 9285,
"preview": "# Lab 2: Scale and Update Deployments\n\nIn this lab, you'll learn how to update the number of instances\na deployment has "
},
{
"path": "docs/Lab3/README.md",
"chars": 13237,
"preview": "# Lab 3: Scale and update apps natively, building multi-tier applications\n\nIn this lab you'll learn how to deploy the sa"
},
{
"path": "docs/Lab4/README.md",
"chars": 4453,
"preview": "# ***UNDER CONSTRUCTION***\n\n## 1. Check the health of apps\n\nKubernetes uses availability checks (liveness probes) to kno"
},
{
"path": "docs/Lab4/healthcheck.yml",
"chars": 750,
"preview": "apiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: hw-demo-deployment\nspec:\n replicas: 3\n template:\n metad"
},
{
"path": "docs/LabD/README.md",
"chars": 1784,
"preview": "# Optional Debugging Lab - Tips and Tricks for Debugging Applications in Kubernetes\n\nAdvanced debugging techniques to re"
},
{
"path": "docs/MAINTAINERS.md",
"chars": 3286,
"preview": "# Maintainers Guide\n\nThis guide is intended for maintainers - anybody with commit access to one or more Developer Techno"
},
{
"path": "docs/README.md",
"chars": 14862,
"preview": "# IBM Cloud Kubernetes Service Lab\n\n<img src=\"https://kubernetes.io/images/favicon.png\" width=\"200\">\n\n## An introduction"
},
{
"path": "docs/SUMMARY.md",
"chars": 625,
"preview": "# Summary\n\n<!-- Rules of SUMMARY.md are here: https://docs.gitbook.com/integrations/github/content-configuration#summary"
},
{
"path": "docs/bx_login.sh",
"chars": 794,
"preview": "#!/bin/sh\n\nif [ -z $CF_ORG ]; then\n CF_ORG=\"$BLUEMIX_ORG\"\nfi\nif [ -z $CF_SPACE ]; then\n CF_SPACE=\"$BLUEMIX_SPACE\"\nfi\n\n"
},
{
"path": "docs/deploy.sh",
"chars": 2794,
"preview": "#!/bin/bash\n\necho \"Create Demo Application\"\n\nIP_ADDR=$(bx cs workers $CLUSTER_NAME | grep normal | awk '{ print $2 }')\ni"
},
{
"path": "docs/deploy_rollup.sh",
"chars": 657,
"preview": "#!/bin/bash\necho \"Install IBM Cloud CLI\"\n. workshop/install_bx.sh\nif [ $? -ne 0 ]; then\n echo \"Failed to install IBM Cl"
},
{
"path": "docs/install_bx.sh",
"chars": 1019,
"preview": "#!/bin/bash\n\necho \"Download IBM Cloud CLI\"\nwget --quiet --output-document=/tmp/Bluemix_CLI_amd64.tar.gz http://public.d"
},
{
"path": "docs/test_yml.sh",
"chars": 323,
"preview": "#!/bin/bash\necho \"Testing YAML files for <namespace>\"\nls */*.yml\nimageLines=`grep image: */*.yml`\nnamespaceLines=`grep \\"
},
{
"path": "mkdocs.yml",
"chars": 2229,
"preview": "# Project information\nsite_name: Kube 101 Workshop\nsite_url: https://ibm.github.io/kube101\nsite_author: IBM Developer\n\n#"
},
{
"path": "presentation/.keep",
"chars": 0,
"preview": ""
},
{
"path": "presentation/scripts/README.md",
"chars": 1433,
"preview": "# Instructions for Running the Presentation Demo Scripts\n\n## The following are needed:\n- `bx` executable\n- `bx cs` plugi"
},
{
"path": "presentation/scripts/all.sh",
"chars": 92,
"preview": "#!/bin/bash\nset -e\n\n./lab1-1.sh\n./lab1-2.sh\n./lab1-3.sh\n./lab2-1.sh\n./lab2-2.sh\n\n./clean.sh\n"
},
{
"path": "presentation/scripts/clean.sh",
"chars": 137,
"preview": "#!/bin/bash\n\nsource ./demoscript\n\ncomment \"Cleaning up...\"\n\nkubectl delete deploy/guestbook || true\nkubectl delete svc/g"
},
{
"path": "presentation/scripts/common.sh",
"chars": 318,
"preview": "#!/bin/bash\n\nsource ./demoscript\n\n# These need to be changed by the person running the demo, or set them\n# in your envir"
},
{
"path": "presentation/scripts/demoscript",
"chars": 6740,
"preview": "# SAVE=1 Save output to a tar file for off-line running\n# SKIP=1 Do not wait for user to press a key to contin"
},
{
"path": "presentation/scripts/lab1-1.sh",
"chars": 738,
"preview": "#!/bin/bash\n\nsource ./common.sh\n\ncomment \"Pull the first version of our docker image\"\nDIR=$(pwd)\ncd ../../workshop/Lab1\n"
},
{
"path": "presentation/scripts/lab1-2.sh",
"chars": 590,
"preview": "#!/bin/bash\n\nsource ./common.sh\n\ncomment \"Deploy our app on kubernetes, using the image in our registry\"\ndoit kubectl ru"
},
{
"path": "presentation/scripts/lab1-3.sh",
"chars": 1054,
"preview": "#!/bin/bash\n\nsource ./common.sh\n\ncomment --nolf \"Our app is running in our kubernetes cluster, but it is not reachable\"\n"
},
{
"path": "presentation/scripts/lab2-1.sh",
"chars": 1056,
"preview": "#!/bin/bash\n\nsource ./common.sh\n\nSERVICE_PORT=$(kubectl get svc guestbook -ojson | grep nodePort | sed \"s/.*: *\\([0-9]*\\"
},
{
"path": "presentation/scripts/lab2-2.sh",
"chars": 417,
"preview": "#!/bin/bash\n\nsource ./common.sh\n\n# `kubectl set image` to change the underlying image to our v2 version\ncomment \"Update "
}
]
// ... and 4 more files (download for full content)
About this extraction
This page contains the full source code of the IBM/kube101 GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 41 files (102.6 KB), approximately 27.0k tokens. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.