[
  {
    "path": ".gitbook.yaml",
    "content": "# Do not edit this file, to adjust the table of contents, modify SUMMARY.md\n\nroot: ./docs/\n\nstructure:\n  readme: README.md\n  summary: SUMMARY.md\n"
  },
  {
    "path": ".github/workflows/ci.yml",
    "content": "name: ci\non:\n  push:\n    branches:\n      - main\n      - master\njobs:\n  deploy:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v2\n      - uses: actions/setup-python@v2\n        with:\n          python-version: 3.x\n      - run: pip install mkdocs-material\n      - run: mkdocs gh-deploy --force\n"
  },
  {
    "path": ".markdownlint.json",
    "content": "{\n    \"line-length\": false,\n    \"MD014\": false,\n    \"MD033\": false,\n    \"MD026\": false\n}"
  },
  {
    "path": ".travis.yml",
    "content": "---\nlanguage: node_js\nnode_js: 10\n\nbefore_script:\n  - npm install markdownlint-cli\nscript:\n  - markdownlint -c .markdownlint.json docs --ignore docs/SUMMARY.md\n  # - ./.verify-links.sh -v docs\n"
  },
  {
    "path": ".verify-links.sh",
    "content": "#!/bin/bash\n\n# Copyright 2017 The Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# This script will scan all md (markdown) files for bad references.\n# It will look for strings of the form [...](...) and make sure that\n# the (...) points to either a valid file in the source tree or, in the\n# case of it being an http url, it'll make sure we don't get a 404.\n#\n# Usage: verify-links.sh [ dir | file ... ]\n# default arg is root of our source tree\n\nset -o errexit\nset -o nounset\nset -o pipefail\n\nverbose=\"\"\ndebugFlag=\"\"\nmaxRetries=\"1\"\nstop=\"\"\ntmp=/tmp/out${RANDOM}\n\ntrap clean EXIT\nseenFiles=( \":\" )   # just to prevent \"undefined\" errors\n\n# findPrevious will search for a file to see if we've seen it before.\n# If we have then return the matching \"anchorFile\". If we haven't\n# seen it then add it to \"seenFiles\" and create a new \"anchorFile\".\n# $1 == search file\n# Note we can't use a map because bash on a mac doesn't support it.\nfoundAnchor=\"\"\nfunction findPreviousFile() {\n  for f in \"${seenFiles[@]}\" ; do\n    orig=${f%%:*}\n\tif [[ \"${orig}\" == \"$1\" ]]; then\n\t  foundAnchor=${f#*:}\n\t  return 0\n\tfi\n  done\n\n  # Didn't it so create a new anchorFile and save it for next time\n  foundAnchor=\"${tmp}-anchors-${RANDOM}-${RANDOM}\"\n  seenFiles+=(\"$1:${foundAnchor}\")\n  return 1\n}\n\nfunction debug {\n  if [[ \"$debugFlag\" != \"\" ]]; then\n    (>&2 echo $*)\n  fi\n}\n\nfunction clean {\n  rm -f ${tmp}*\n}\n\nwhile [[ \"$#\" != \"0\" && \"$1\" == \"-\"* ]]; do\n  opts=\"${1:1}\"\n  while [[ \"$opts\" != \"\" ]]; do\n    case \"${opts:0:1}\" in\n      v) verbose=\"1\" ;;\n      d) debugFlag=\"1\" ; verbose=\"1\" ;;\n      t) maxRetries=\"5\" ;;\n      -) stop=\"1\" ;;\n      ?) echo \"Usage: $0 [OPTION]... [DIR|FILE]...\"\n         echo \"Verify all links in markdown files.\"\n         echo\n         echo \"  -v   show each file as it is checked\"\n         echo \"  -d   show each href as it is found\"\n         echo \"  -t   retry GETs to http(s) URLs 5 times\"\n         echo \"  -?   show this help text\"\n         echo \"  --   treat remainder of args as dir/files\"\n         exit 0 ;;\n      *) echo \"Unknown option '${opts:0:1}'\"\n         exit 1 ;;\n    esac\n    opts=\"${opts:1}\"\n  done\n  shift\n  if [[ \"$stop\" == \"1\" ]]; then\n    break\n  fi\ndone\n\n# echo verbose:$verbose\n# echo debugFlag:$debugFlag\n# echo args:$*\n\narg=\"\"\n\nif [ \"$*\" == \"\" ]; then\n  arg=\".\"\nfi\n\nmdFiles=$(find $* $arg -name \"*.md\" | sort | grep -v vendor | grep -v glide)\n\nclean\nfor file in ${mdFiles}; do\n  # echo scanning $file\n  dir=$(dirname $file)\n\n  [[ -n \"$verbose\" ]] && echo \"> $file\"\n\n  # Replace ) with )\\n so that each possible href is on its own line.\n  # Then only grab lines that have [..](..) in them - put results in tmp file.\n  # If the file doesn't have any lines with [..](..) then skip this file\n  # Steps:\n  #  tr   - convert all \\n to a space since newlines shouldn't change anything\n  #  sed  - add a \\n after each ) since ) ends what we're looking for.\n  #         This makes it so that each href is on a line by itself\n  #  sed  - prefix each line with a space so the grep can do [^\\\\]\n  #  grep - find all lines that match [...](...)\n  cat $file | \\\n    tr '\\n' ' ' | \\\n    sed \"s/)/)\\n/g\" | \\\n    sed \"s/^/ /g\" | \\\n    grep \"[^\\\\]\\[.*\\](.*)\" > ${tmp}1 || continue\n\n  # This sed will extract the href portion of the [..](..) - meaning\n  # the stuff in the parens.\n  sed \"s/.*\\[*\\]\\([^()]*\\)/\\1/\" < ${tmp}1 > ${tmp}2  || continue\n\n  cat ${tmp}2 | while read line ; do\n    # Strip off the leading and trailing parens, and then spaces\n    ref=${line#*(}\n    ref=${ref%)*}\n    ref=$(echo $ref | sed \"s/ *//\" | sed \"s/ *$//\")\n\n    # Show all hrefs - mainly for verifying in our tests\n    debug \"Checking: '$ref'\"\n\n    # An external href (ie. starts with http)\n    if [ \"${ref:0:4}\" == \"http\" ]; then\n      try=0\n      while true ; do\n        if curl -f -s -k --connect-timeout 10 ${ref} > /dev/null 2>&1 ; then\n          break\n        fi\n        let try=try+1\n        if [ ${try} -eq ${maxRetries} ]; then\n          extra=\"\"\n          if [ ${try} -gt 1 ]; then\n            extra=\"(tried ${try} times) \"\n          fi\n          echo $file: Can\\'t load url: ${ref} ${extra} | tee -a ${tmp}3\n          break\n        fi\n        sleep 1\n      done\n      continue\n    fi\n\n    # Skip \"mailto:\" refs\n    if [ \"${ref:0:7}\" == \"mailto:\" ]; then\n      continue\n    fi\n\n    # Local file link (i.e. ref contains a #)\n    if [[ \"${ref/\\#}\" != \"${ref}\" ]]; then\n\n      # If ref doesn't start with \"#\" then update filepath\n      if [ \"${ref:0:1}\" != \"#\" ]; then\n        # Split ref into filepath and the section link\n        reffile=$(echo ${ref} | awk -F\"#\" '{print $1}')\n        fullpath=${dir}/${reffile}\n        ref=$(echo ${ref} | awk -F\"#\" '{$1=\"\"; print $0}')\n      else\n        fullpath=${file}\n        ref=${ref:1}\n      fi\n\n      if [[ ! -e \"${fullpath}\" ]]; then\n        echo \"$file: Can't find referenced file '${fullpath}'\" | \\\n          tee -a ${tmp}3\n        continue\n      fi\n\n      # Remove leading and trailing spaces\n      ref=$(echo ${ref} | sed 's/^[[:space:]]*//' | sed 's/[[:space:]]*$//')\n\n      # If we've seen this file before then grab its processed tmp file\n\t  if findPreviousFile \"${fullpath}\" ; then\n        anchorFile=\"${foundAnchor}\"\n      else\n        anchorFile=\"${foundAnchor}\"\n\n        # Search file for sections\n        used=\"\" # anchors used, seen+twiddled ones\n\n        # Find all section headers in the file.\n        # Remove leading & trailing spaces.\n        # Lower case it.\n        # Convert spaces to \"-\".\n        # Drop all non alphanumeric chars.\n        # Twiddle section anchor if we've seen it before.\n        grep \"^[[:space:]]*#\" < ${fullpath} | \\\n          sed 's/[[:space:]]*##*[[:space:]]*//' | \\\n          sed 's/[[:space:]]*$//' | \\\n          tr '[:upper:]' '[:lower:]' | \\\n          sed \"s/  */-/g\" | \\\n          sed \"s/[^-a-zA-Z0-9]//g\" | while read section ; do\n            # If we haven't used this exact anchor before just use it now\n            if [[ \"${used}\" != *\" ${section} \"* ]]; then\n              anchor=${section}\n            else\n              # We've used this anchor before so add \"-#\" to the end.\n              # Keep adding 1 to \"#\" until we find a free spot.\n              let num=1\n              while true; do\n                anchor=\"${section}-${num}\"\n                if [[ \"${used}\" != *\" ${anchor} \"* ]]; then\n                  break\n                fi\n                let num+=1\n              done\n            fi\n\n            echo \"${anchor}\"\n            used=\"${used} ${anchor} \"\n\n            debug \"Mapped section '${section}' to '${anchor}'\"\n\n          done > ${anchorFile} || true\n\n        # Add sections of the form <a name=\"xxx\">\n        grep \"<a name=\" <${fullpath} | \\\n          sed 's/<a name=\"/\\n<a name=\"/g' | \\\n          sed 's/^.*<a name=\"\\(.*\\)\">.*$/\\1/' | \\\n          sort | uniq >> ${anchorFile} || true\n\n        # echo sections ; cat ${tmp}sections1\n      fi\n\n      # Skip refs of the form #L<num> and assume its pointing to a line\n      # number of a file and those don't have anchors\n      if [[ \"${ref}\" =~ ^L([0-9])+$ ]]; then\n        continue\n      fi\n\n      # Finally, look for the ref in the list of sections/anchors\n      debug \"Anchor file(${fullpath}): ${anchorFile}\"\n      if ! grep \"^${ref}$\" ${anchorFile} > /dev/null 2>&1 ; then\n        echo $file: Can\\'t find section \\'\\#${ref}\\' in ${fullpath} | \\\n          tee -a ${tmp}3\n      fi\n\n      continue\n\n    fi\n\n    newPath=${dir}/${ref}\n\n    # And finally make sure the file is there\n    # debug line: echo ref: $ref \"->\" $newPath\n    if [[ ! -e \"${newPath}\" ]]; then\n      echo $file: Can\\'t find: ${newPath} | tee -a ${tmp}3\n    fi\n\n  done\ndone\n\nif [ -s ${tmp}3 ]; then exit 1 ; fi\n"
  },
  {
    "path": "LICENSE",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        https://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   Copyright 2018 IBM\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       https://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "# Introduction to Kubernetes\n\n[![Build Status](https://travis-ci.org/IBM/kube101.svg?branch=master)](https://travis-ci.org/IBM/kube101)\n\nThis repository contains introductory material for Kubernetes.\n\nThere is a presenter-run [meetup], including [automated scripts] for running a demonstration of Kubernetes as provided by IBM Cloud Kubernetes Service. This should take from 45 minutes to an hour and a half based on the style of the presenter as well as audience participation.\n\nThere is a self-guided or host-guided [workshop], with detailed explanations about the principles of operating an application in a Kubernetes environment. Including some time for setup, this should take between 2-4 hours.\n\n[meetup]: ./presentation/IntroductionToKube.pptx\n[automated scripts]: ./presentation/scripts\n[workshop]: https://ibm.github.io/kube101/\n"
  },
  {
    "path": "demo/.keep",
    "content": ""
  },
  {
    "path": "docs/CONTRIBUTING.md",
    "content": "# Contributing In General\n\nOur project welcomes external contributions! If you have an itch, please feel free to scratch it.\n\nTo contribute code or documentation, please submit a pull request to the [GitHub repository](https://github.com/IBM/kube101).\n\nA good way to familiarize yourself with the codebase and contribution process is to look for and tackle low-hanging fruit in the [issue tracker](https://github.com/IBM/kube101/issues). Before embarking on a more ambitious contribution, please quickly get in touch with us via an issue.\n\n**We appreciate your effort, and want to avoid a situation where a contribution requires extensive rework (by you or by us), sits in the queue for a long time, or cannot be accepted at all!**\n\n## Proposing new features\n\nIf you would like to implement a new feature, please [raise an issue](https://github.com/IBM/kube101/issues) before sending a pull request so the feature can be discussed.\nThis is to avoid you spending your valuable time working on a feature that the project developers are not willing to accept into the code base.\n\n## Fixing bugs\n\nIf you would like to fix a bug, please [raise an issue](https://github.com/IBM/kube101/issues) before sending a pull request so it can be discussed.\nIf the fix is trivial or non controversial then this is not usually necessary.\n\n## Merge approval\n\nThe project maintainers use LGTM (Looks Good To Me) in comments on the code review to\nindicate acceptance. A change requires LGTMs from two of the maintainers of each\ncomponent affected. Note that if your initial push does not pass TravisCI your change will not be approved.\n\nFor more details, see the [MAINTAINERS](MAINTAINERS.md) page.\n"
  },
  {
    "path": "docs/Lab0/README.md",
    "content": "# Lab 0. Access a Kubernetes cluster\n\n## Set up your kubernetes environment\n\nFor the hands-on labs in this tutorial repository, you will need a kubernetes cluster. One option for creating a cluster is to make use of the Kubernetes as-a-service from the IBM Cloud Kubernetes Service as outlined below.\n\n### Use the IBM Cloud Kubernetes Service\n\nYou will need either a paid IBM Cloud account or an IBM Cloud account which is a Trial account (not a Lite account). If you have one of these accounts, use the [Getting Started Guide](https://cloud.ibm.com/docs/containers?topic=containers-getting-started) to create your cluster.\n\n### Use a hosted trial environment\n\nThere are a few services that are accessible over the Internet for temporary use. As these are free services, they can sometimes experience periods of limited availablity/quality. On the other hand, they can be a quick way to get started!\n\n* [Play with Kubernetes](https://labs.play-with-k8s.com/) After signing in with your github or docker hub id, click on **Start**, then **Add New Instance** and follow steps shown in terminal to spin up the cluster and add workers.\n\n### Set up on your own workstation\n\nIf you would like to configure kubernetes to run on your local workstation for non-production, learning use, there are several options.\n\n* [Minikube](https://kubernetes.io/docs/setup/learning-environment/minikube/) This solution requires the installation of a supported VM provider (KVM, VirtualBox, HyperKit, Hyper-V - depending on platform)\n\n* [Kubernetes in Docker (kind)](https://kind.sigs.k8s.io/) Runs a kubernetes cluster on Docker containers\n\n* [Docker Desktop (Mac)](https://docs.docker.com/docker-for-mac/kubernetes/) [Docker Desktop (Windows)](https://docs.docker.com/docker-for-windows/kubernetes/) Docker Desktop includes a kubernetes environment\n\n* [Microk8s](https://microk8s.io/docs/) Installable kubernetes packaged as an Ubuntu `snap` image.\n\n## Install the IBM Cloud command-line interface\n\n1. As a prerequisite for the IBM Cloud Kubernetes Service plug-in, install the [IBM Cloud command-line interface](https://clis.ng.bluemix.net/ui/home.html). Once installed, you can access IBM Cloud from your command-line with the prefix `bx`.\n2. Log in to the IBM Cloud CLI: `ibmcloud login`.\n3. Enter your IBM Cloud credentials when prompted.\n\n   **Note:** If you have a federated ID, use `ibmcloud login --sso` to log in to the IBM Cloud CLI. Enter your user name, and use the provided URL in your CLI output to retrieve your one-time passcode. You know you have a federated ID when the login fails without the `--sso` and succeeds with the `--sso` option.\n\n## Install the IBM Cloud Kubernetes Service plug-in\n\n1. To create Kubernetes clusters and manage worker nodes, install the IBM Cloud Kubernetes Service plug-in:\n\n   ```bash\n   ibmcloud plugin install container-service -r Bluemix\n   ```\n\n   **Note:** The prefix for running commands by using the IBM Cloud Kubernetes Service plug-in is `bx cs`.\n\n2. To verify that the plug-in is installed properly, run the following command:\n\n   ```bash\n   ibmcloud plugin list\n   ```\n\n   The IBM Cloud Kubernetes Service plug-in is displayed in the results as `container-service`.\n\n## Download the Kubernetes CLI\n\nTo view a local version of the Kubernetes dashboard and to deploy apps into your clusters, you will need to install the Kubernetes CLI that corresponds with your operating system:\n\n* [OS X](https://storage.googleapis.com/kubernetes-release/release/v1.10.8/bin/darwin/amd64/kubectl)\n* [Linux](https://storage.googleapis.com/kubernetes-release/release/v1.10.8/bin/linux/amd64/kubectl)\n* [Windows](https://storage.googleapis.com/kubernetes-release/release/v1.10.8/bin/windows/amd64/kubectl.exe)\n\n**For Windows users:** Install the Kubernetes CLI in the same directory as the IBM Cloud CLI. This setup saves you some filepath changes when you run commands later.\n\n**For OS X and Linux users:**\n\n1. Move the executable file to the `/usr/local/bin` directory using the command `mv /<path_to_file>/kubectl /usr/local/bin/kubectl` .\n\n1. Make sure that `/usr/local/bin` is listed in your PATH system variable.\n\n   ```shell\n   $ echo $PATH\n   /usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin\n   ```\n\n1. Convert the binary file to an executable: `chmod +x /usr/local/bin/kubectl`\n\n## Configure Kubectl to point to IBM Cloud Kubernetes Service\n\n1. List the clusters in your account:\n\n   ```shell\n   ibmcloud ks clusters\n   ```\n\n1. Set an environment variable that will be used in subsequent commands in this lab.\n\n   ```shell\n   export CLUSTER_NAME=<your_cluster_name>\n   ```\n\n1. Configure `kubectl` to point to your cluster\n\n   ```shell\n   ibmcloud ks cluster config --cluster $CLUSTER_NAME\n   ```\n\n1. Validate proper configuration\n\n   ```shell\n   kubectl get namespace\n   ```\n\n1. You should see output similar to the following, if so, then your're ready to continue.\n\n   ```shell\n   NAME              STATUS   AGE\n   default           Active   125m\n   ibm-cert-store    Active   121m\n   ibm-system        Active   124m\n   kube-node-lease   Active   125m\n   kube-public       Active   125m\n   kube-system       Active   125m\n   ```\n\n## Download the Workshop Source Code\n\nRepo `guestbook` has the application that we'll be deploying.\nWhile we're not going to build it we will use the deployment configuration files from that repo.\nGuestbook application has two versions v1 and v2 which we will use to demonstrate some rollout\nfunctionality later. All the configuration files we use are under the directory guestbook/v1.\n\nRepo `kube101` contains the step by step instructions to run the workshop.\n\n```shell\ngit clone https://github.com/IBM/guestbook.git\ngit clone https://github.com/IBM/kube101.git\n```\n"
  },
  {
    "path": "docs/Lab1/README.md",
    "content": "# Lab 1. Deploy your first application\n\nLearn how to deploy an application to a Kubernetes cluster hosted within\nthe IBM Container Service.\n\n## 0. Prerequisites\n\nMake sure you satisfy the prerequisites as outlined in [Lab 0](../Lab0/README.md)\n\n## 1. Deploy the guestbook application\n\nIn this part of the lab we will deploy an application called `guestbook`\nthat has already been built and uploaded to DockerHub under the name\n`ibmcom/guestbook:v1`.\n\n1. Start by running `guestbook`:\n\n   ```shell\n   kubectl create deployment guestbook --image=ibmcom/guestbook:v1\n   ```\n\n   This action will take a bit of time. To check the status of the running application,\n   you can use `$ kubectl get pods`.\n\n   You should see output similar to the following:\n\n   ```shell\n   kubectl get pods\n   ```\n\n   Eventually, the status should show up as `Running`.\n\n   ```shell\n   $ kubectl get pods\n   NAME                          READY     STATUS              RESTARTS   AGE\n   guestbook-59bd679fdc-bxdg7    1/1       Running             0          1m\n   ```\n\n   The end result of the run command is not just the pod containing our application containers,\n   but a Deployment resource that manages the lifecycle of those pods.\n\n1. Once the status reads `Running`, we need to expose that deployment as a\n   service so we can access it through the IP of the worker nodes.\n   The `guestbook` application listens on port 3000.  Run:\n\n   ```shell\n   kubectl expose deployment guestbook --type=\"NodePort\" --port=3000\n   ```\n\n1. To find the port used on that worker node, examine your new service:\n\n   ```shell\n   $ kubectl get service guestbook\n   NAME        TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)          AGE\n   guestbook   NodePort   10.10.10.253   <none>        3000:31208/TCP   1m\n   ```\n\n   We can see that our `<nodeport>` is `31208`. We can see in the output the port mapping from 3000 inside\n   the pod exposed to the cluster on port 31208. This port in the 31000 range is automatically chosen,\n   and could be different for you.\n\n1. `guestbook` is now running on your cluster, and exposed to the internet. We need to find out where it is accessible.\n   The worker nodes running in the container service get external IP addresses.\n   Get the workers for your cluster and note one (any one) of the public IPs listed on the `<public-IP>` line. Replace `$CLUSTER_NAME` with your cluster name unless you have this environment variable set.\n\n   ```shell\n   $ kubectl get nodes -o wide\n   NAME           STATUS   ROLES           AGE   VERSION           INTERNAL-IP    EXTERNAL-IP      OS-IMAGE   KERNEL-VERSION                CONTAINER-RUNTIME\n   10.185.199.3   Ready    master,worker   63d   v1.16.2+283af84   10.185.199.3   169.59.228.215   Red Hat    3.10.0-1127.13.1.el7.x86_64   cri-o://1.16.6-17.rhaos4.3.git4936f44.el7\n   10.185.199.6   Ready    master,worker   63d   v1.16.2+283af84   10.185.199.6   169.47.78.51     Red Hat    3.10.0-1127.13.1.el7.x86_64   cri-o://1.16.6-17.rhaos4.3.git4936f44.el7\n   ```\n\n   We can see that our `<EXTERNAL-IP>` is `169.59.228.215`.\n\n1. Now that you have both the address and the port, you can now access the application in the web browser\n   at `<public-IP>:<nodeport>`. In the example case this is `173.193.99.136:31208`.\n\nCongratulations, you've now deployed an application to Kubernetes!\n\nWhen you're all done, continue to the\n[next lab of this course](../Lab2/README.md).\n"
  },
  {
    "path": "docs/Lab1/script/script.md",
    "content": "\n# Pod\n\nIn Kubernetes, a group of one or more containers is called a pod. Containers in a pod are deployed together, and are started, stopped, and replicated as a group. The simplest pod definition describes the deployment of a single container. For example, an nginx web server pod might be defined as such:\n\n```yaml\napiVersion: v1\nkind: Pod\nmetadata:\n  name: mynginx\n  namespace: default\n  labels:\n    run: nginx\nspec:\n  containers:\n  - name: mynginx\n    image: nginx:latest\n    ports:\n    - containerPort: 80\n```\n\n# Labels\n\nIn Kubernetes, labels are a system to organize objects into groups. Labels are key-value pairs that are attached to each object. Label selectors can be passed along with a request to the apiserver to retrieve a list of objects which match that label selector.\n\nTo add a label to a pod, add a labels section under metadata in the pod definition:\n\n```yaml\napiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    run: nginx\n...\n```\n\nTo label a running pod\n\n```bash\nkubectl label pod mynginx type=webserver\npod \"mynginx\" labeled\n```\n\nTo list pods based on labels\n\n```bash\nkubectl get pods -l type=webserver\nNAME      READY     STATUS    RESTARTS   AGE\nmynginx   1/1       Running   0          21m\n```\n\n# Deployments\n\nA Deployment provides declarative updates for pods and replicas. You only need to describe the desired state in a Deployment object, and it will change the actual state to the desired state. The Deployment object defines the following details:\n\nThe elements of a Replication Controller definition\nThe strategy for transitioning between deployments\nTo create a deployment for a nginx webserver, edit the nginx-deploy.yaml file as\n\n```yaml\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n  generation: 1\n  labels:\n    run: nginx\n  name: nginx\n  namespace: default\nspec:\n  replicas: 3\n  selector:\n    matchLabels:\n      run: nginx\n  strategy:\n    rollingUpdate:\n      maxSurge: 1\n      maxUnavailable: 1\n    type: RollingUpdate\n  template:\n    metadata:\n      labels:\n        run: nginx\n    spec:\n      containers:\n      - image: nginx:latest\n        imagePullPolicy: Always\n        name: nginx\n        ports:\n        - containerPort: 80\n          protocol: TCP\n      dnsPolicy: ClusterFirst\n      restartPolicy: Always\n      securityContext: {}\n      terminationGracePeriodSeconds: 30\n\n```\n\nand create the deployment\n\n```bash\nkubectl create -f nginx-deploy.yaml\ndeployment \"nginx\" created\n```\n\nThe deployment creates the following objects\n\n```bash\nkubectl get all -l run=nginx\n\nNAME           DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE\ndeploy/nginx   3         3         3            3           4m\n\nNAME                 DESIRED   CURRENT   READY     AGE\nrs/nginx-664452237   3         3         3         4m\n\nNAME                       READY     STATUS    RESTARTS   AGE\npo/nginx-664452237-h8dh0   1/1       Running   0          4m\npo/nginx-664452237-ncsh1   1/1       Running   0          4m\npo/nginx-664452237-vts63   1/1       Running   0          4m\n```\n\n# services\n\nServices\n\nKubernetes pods, as containers, are ephemeral. Replication Controllers create and destroy pods dynamically, e.g. when scaling up or down or when doing rolling updates. While each pod gets its own IP address, even those IP addresses cannot be relied upon to be stable over time. This leads to a problem: if some set of pods provides functionality to other pods inside the Kubernetes cluster, how do those pods find out and keep track of which other?\n\nA Kubernetes Service is an abstraction which defines a logical set of pods and a policy by which to access them. The set of pods targeted by a Service is usually determined by a label selector. Kubernetes offers a simple Endpoints API that is updated whenever the set of pods in a service changes.\n\nTo create a service for our nginx webserver, edit the nginx-service.yaml file\n\n```yaml\napiVersion: v1\nkind: Service\nmetadata:\n  name: nginx\n  labels:\n    run: nginx\nspec:\n  selector:\n    run: nginx\n  ports:\n  - protocol: TCP\n    port: 8000\n    targetPort: 80\n  type: ClusterIP\n```\n\nCreate the service\n\n`kubectl create -f nginx-service.yaml`\nservice \"nginx\" created\n\n```bash\nkubectl get service -l run=nginx\nNAME      CLUSTER-IP     EXTERNAL-IP   PORT(S)   AGE\nnginx     10.254.60.24   <none>        8000/TCP    38s\n```\n\nDescribe the service:\n\n```bash\nkubectl describe service nginx\nName:                   nginx\nNamespace:              default\nLabels:                 run=nginx\nSelector:               run=nginx\nType:                   ClusterIP\nIP:                     10.254.60.24\nPort:                   <unset> 8000/TCP\nEndpoints:              172.30.21.3:80,172.30.4.4:80,172.30.53.4:80\nSession Affinity:       None\nNo events.\n```\n\nThe above service is associated to our previous nginx pods. Pay attention to the service selector run=nginx field. It tells Kubernetes that all pods with the label run=nginx are associated to this service, and should have traffic distributed amongst them. In other words, the service provides an abstraction layer, and it is the input point to reach all of the associated pods.\n"
  },
  {
    "path": "docs/Lab2/README.md",
    "content": "# Lab 2: Scale and Update Deployments\n\nIn this lab, you'll learn how to update the number of instances\na deployment has and how to safely roll out an update of your application\non Kubernetes.\n\nFor this lab, you need a running deployment of the `guestbook` application\nfrom the previous lab. If you need to create it, run:\n\n```shell\nkubectl create deployment guestbook --image=ibmcom/guestbook:v1\n```\n\n## 1. Scale apps with replicas\n\nA *replica* is a copy of a pod that contains a running service. By having\nmultiple replicas of a pod, you can ensure your deployment has the available\nresources to handle increasing load on your application.\n\n1. `kubectl` provides a `scale` subcommand to change the size of an\n   existing deployment. Let's increase our capacity from a single running instance of\n   `guestbook` up to 10 instances:\n\n   ```shell\n   kubectl scale --replicas=10 deployment guestbook\n   ```\n\n   Kubernetes will now try to make reality match the desired state of\n   10 replicas by starting 9 new pods with the same configuration as\n   the first.\n\n1. To see your changes being rolled out, you can run:\n\n   ```shell\n   kubectl rollout status deployment guestbook\n   ```\n\n   The rollout might occur so quickly that the following messages might\n   _not_ display:\n\n   ```shell\n   $ kubectl rollout status deployment guestbook\n   Waiting for rollout to finish: 1 of 10 updated replicas are available...\n   Waiting for rollout to finish: 2 of 10 updated replicas are available...\n   Waiting for rollout to finish: 3 of 10 updated replicas are available...\n   Waiting for rollout to finish: 4 of 10 updated replicas are available...\n   Waiting for rollout to finish: 5 of 10 updated replicas are available...\n   Waiting for rollout to finish: 6 of 10 updated replicas are available...\n   Waiting for rollout to finish: 7 of 10 updated replicas are available...\n   Waiting for rollout to finish: 8 of 10 updated replicas are available...\n   Waiting for rollout to finish: 9 of 10 updated replicas are available...\n   deployment \"guestbook\" successfully rolled out\n   ```\n\n1. Once the rollout has finished, ensure your pods are running by using:\n\n   ```shell\n   kubectl get pods\n   ```\n\n   You should see output listing 10 replicas of your deployment:\n\n   ```shell\n   $ kubectl get pods\n   NAME                        READY     STATUS    RESTARTS   AGE\n   guestbook-562211614-1tqm7   1/1       Running   0          1d\n   guestbook-562211614-1zqn4   1/1       Running   0          2m\n   guestbook-562211614-5htdz   1/1       Running   0          2m\n   guestbook-562211614-6h04h   1/1       Running   0          2m\n   guestbook-562211614-ds9hb   1/1       Running   0          2m\n   guestbook-562211614-nb5qp   1/1       Running   0          2m\n   guestbook-562211614-vtfp2   1/1       Running   0          2m\n   guestbook-562211614-vz5qw   1/1       Running   0          2m\n   guestbook-562211614-zksw3   1/1       Running   0          2m\n   guestbook-562211614-zsp0j   1/1       Running   0          2m\n   ```\n\n**Tip:** Another way to improve availability is to\n[add clusters and regions](https://cloud.ibm.com/docs/containers?topic=containers-ha_clusters#ha_clusters)\nto your deployment, as shown in the following diagram:\n\n![HA with more clusters and regions](../images/cs_cluster_ha_roadmap_multizone_public.png)\n\n## 2. Update and roll back apps\n\nKubernetes allows you to do rolling upgrade of your application to a new\ncontainer image. This allows you to easily update the running image and also allows you to\neasily undo a rollout if a problem is discovered during or after deployment.\n\nIn the previous lab, we used an image with a `v1` tag. For our upgrade\nwe'll use the image with the `v2` tag.\n\nTo update and roll back:\n\n1. Using `kubectl`, you can now update your deployment to use the\n   `v2` image. `kubectl` allows you to change details about existing\n   resources with the `set` subcommand. We can use it to change the\n   image being used.\n\n    ```shell\n    kubectl set image deployment/guestbook guestbook=ibmcom/guestbook:v2\n    ```\n\n   Note that a pod could have multiple containers, each with its own name.\n   Each image can be changed individually or all at once by referring to the name.\n   In the case of our `guestbook` Deployment, the container name is also `guestbook`.\n   Multiple containers can be updated at the same time.\n   ([More information](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#-em-image-em-).)\n\n1. To check the status of the rollout, run:\n\n   ```shell\n   kubectl rollout status deployment/guestbook\n   ```\n\n  The rollout might occur so quickly that the following messages\n   might _not_ display:\n\n   ```shell\n   $ kubectl rollout status deployment/guestbook\n   Waiting for rollout to finish: 2 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 3 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 3 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 3 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 4 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 4 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 4 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 4 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 4 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 5 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 5 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 5 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 6 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 6 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 6 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 7 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 7 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 7 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 7 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 8 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 8 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 8 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 8 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 9 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 9 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 9 out of 10 new replicas have been updated...\n   Waiting for rollout to finish: 1 old replicas are pending termination...\n   Waiting for rollout to finish: 1 old replicas are pending termination...\n   Waiting for rollout to finish: 1 old replicas are pending termination...\n   Waiting for rollout to finish: 9 of 10 updated replicas are available...\n   Waiting for rollout to finish: 9 of 10 updated replicas are available...\n   Waiting for rollout to finish: 9 of 10 updated replicas are available...\n   deployment \"guestbook\" successfully rolled out\n   ```\n\n1. Test the application as before, by accessing `<public-IP>:<nodeport>`\n   in the browser to confirm your new code is active.\n\n   Remember, to get the \"nodeport\" and \"public-ip\" use the following commands. Replace `$CLUSTER_NAME` with the name of your cluster if the environment variable is not set.:\n\n   ```shell\n   kubectl describe service guestbook\n   ```\n\n   and\n\n   ```shell\n   kubectl get nodes -o wide\n   ```\n\n   To verify that you're running \"v2\" of guestbook, look at the title of the page,\n   it should now be `Guestbook - v2`. If you are using a browser, make sure you force refresh (invalidating your cache).\n\n1. If you want to undo your latest rollout, use:\n\n   ```shell\n   kubectl rollout undo deployment guestbook\n   ```\n\n   You can then use this command to see the status:\n\n   ```shell\n   kubectl rollout status deployment/guestbook\n   ```\n\n1. When doing a rollout, you see references to *old* replicas and *new* replicas.\n   The *old* replicas are the original 10 pods deployed when we scaled the application.\n   The *new* replicas come from the newly created pods with the different image.\n   All of these pods are owned by the Deployment.\n   The deployment manages these two sets of pods with a resource called a ReplicaSet.\n   We can see the guestbook ReplicaSets with:\n\n   ```shell\n   $ kubectl get replicasets -l app=guestbook\n   NAME                   DESIRED   CURRENT   READY     AGE\n   guestbook-5f5548d4f    10        10        10        21m\n   guestbook-768cc55c78   0         0         0         3h\n   ```\n\nBefore we continue, let's delete the application so we can learn about\na different way to achieve the same results:\n\n To remove the deployment, use\n\n ```shell\n kubectl delete deployment guestbook\n ```\n\n To remove the service, use:\n\n ```shell\n kubectl delete service guestbook\n ```\n\nCongratulations! You deployed the second version of the app. Lab 2\nis now complete. Continue to the [next lab of this course](../Lab3/README.md).\n"
  },
  {
    "path": "docs/Lab3/README.md",
    "content": "# Lab 3: Scale and update apps natively, building multi-tier applications\n\nIn this lab you'll learn how to deploy the same guestbook application we\ndeployed in the previous labs, however, instead of using the `kubectl`\ncommand line helper functions we'll be deploying the application using\nconfiguration files. The configuration file mechanism allows you to have more\nfine-grained control over all of resources being created within the\nKubernetes cluster.\n\nBefore we work with the application we need to clone a github repo:\n\n```shell\ngit clone https://github.com/IBM/guestbook.git\n```\n\nThis repo contains multiple versions of the guestbook application\nas well as the configuration files we'll use to deploy the pieces of the application.\n\nChange directory by running the command\n\n```shell\ncd guestbook/v1\n```\n\nYou will find all the\nconfigurations files for this exercise in this directory.\n\n## 1. Scale apps natively\n\nKubernetes can deploy an individual pod to run an application but when you\nneed to scale it to handle a large number of requests a `Deployment` is the\nresource you want to use.\nA Deployment manages a collection of similar pods. When you ask for a specific number of replicas\nthe Kubernetes Deployment Controller will attempt to maintain that number of replicas at all times.\n\nEvery Kubernetes object we create should provide two nested object fields\nthat govern the object’s configuration: the object `spec` and the object\n`status`. Object `spec` defines the desired state, and object `status`\ncontains Kubernetes system provided information about the actual state of the\nresource. As described before, Kubernetes will attempt to reconcile\nyour desired state with the actual state of the system.\n\nFor Object that we create we need to provide the `apiVersion` you are using\nto create the object, `kind` of the object we are creating and the `metadata`\nabout the object such as a `name`, set of `labels` and optionally `namespace`\nthat this object should belong.\n\nConsider the following deployment configuration for guestbook application\n\n> **guestbook-deployment.yaml**\n\n```yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: guestbook-v1\n  labels:\n    app: guestbook\n    version: \"1.0\"\nspec:\n  replicas: 3\n  selector:\n    matchLabels:\n      app: guestbook\n  template:\n    metadata:\n      labels:\n        app: guestbook\n        version: \"1.0\"\n    spec:\n      containers:\n      - name: guestbook\n        image: ibmcom/guestbook:v1\n        ports:\n        - name: http-server\n          containerPort: 3000\n```\n\nThe above configuration file create a deployment object named 'guestbook'\nwith a pod containing a single container running the image\n`ibmcom/guestbook:v1`.  Also the configuration specifies replicas set to 3\nand Kubernetes tries to make sure that at least three active pods are running at\nall times.\n\n- Create guestbook deployment\n\n   To create a Deployment using this configuration file we use the\n   following command:\n\n   ```shell\n   kubectl create -f guestbook-deployment.yaml\n   ```\n\n- List the pod with label app=guestbook\n\n  We can then list the pods it created by listing all pods that\n  have a label of \"app\" with a value of \"guestbook\". This matches\n  the labels defined above in the yaml file in the\n  `spec.template.metadata.labels` section.\n\n   ```shell\n   kubectl get pods -l app=guestbook\n   ```\n\nWhen you change the number of replicas in the configuration, Kubernetes will\ntry to add, or remove, pods from the system to match your request. To can\nmake these modifications by using the following command:\n\n   ```shell\n   kubectl edit deployment guestbook-v1\n   ```\n\nThis will retrieve the latest configuration for the Deployment from the\nKubernetes server and then load it into an editor for you. You'll notice\nthat there are a lot more fields in this version than the original yaml\nfile we used. This is because it contains all of the properties about the\nDeployment that Kubernetes knows about, not just the ones we chose to\nspecify when we create it. Also notice that it now contains the `status`\nsection mentioned previously.\n\nTo exit the `vi` editor, type `:q!`, of if you made changes that you want to see reflected, save them using `:wq`.\n\nYou can also edit the deployment file we used to create the Deployment\nto make changes. You should use the following command to make the change\neffective when you edit the deployment locally.\n\n   ```shell\n   kubectl apply -f guestbook-deployment.yaml\n   ```\n\nThis will ask Kubernetes to \"diff\" our yaml file with the current state\nof the Deployment and apply just those changes.\n\nWe can now define a Service object to expose the deployment to external\nclients.\n\n> **guestbook-service.yaml**\n\n```yaml\napiVersion: v1\nkind: Service\nmetadata:\n  name: guestbook\n  labels:\n    app: guestbook\nspec:\n  ports:\n  - port: 3000\n    targetPort: http-server\n  selector:\n    app: guestbook\n  type: LoadBalancer\n```\n\nThe above configuration creates a Service resource named guestbook. A Service\ncan be used to create a network path for incoming traffic to your running\napplication.  In this case, we are setting up a route from port 3000 on the\ncluster to the \"http-server\" port on our app, which is port 3000 per the\nDeployment container spec.\n\n- Let us now create the guestbook service using the same type of command\n  we used when we created the Deployment:\n\n  ```shell\n  kubectl create -f guestbook-service.yaml\n  ```\n\n- Test guestbook app using a browser of your choice using the url\n  `<your-cluster-ip>:<node-port>`\n\n  Remember, to get the `nodeport` and `public-ip` use the following commands, replacing `$CLUSTER_NAME` with the name of your cluster if the environment variable is not already set.\n\n  ```shell\n  kubectl describe service guestbook\n  ```\n\n  and\n\n  ```shell\n  kubectl get nodes -o wide\n  ```\n\n## 2. Connect to a back-end service\n\nIf you look at the guestbook source code, under the `guestbook/v1/guestbook`\ndirectory, you'll notice that it is written to support a variety of data\nstores. By default it will keep the log of guestbook entries in memory.\nThat's ok for testing purposes, but as you get into a more \"real\" environment\nwhere you scale your application that model will not work because\nbased on which instance of the application the user is routed to they'll see\nvery different results.\n\nTo solve this we need to have all instances of our app share the same data\nstore - in this case we're going to use a redis database that we deploy to our\ncluster. This instance of redis will be defined in a similar manner to the guestbook.\n\n> **redis-master-deployment.yaml**\n\n```yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: redis-master\n  labels:\n    app: redis\n    role: master\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: redis\n      role: master\n  template:\n    metadata:\n      labels:\n        app: redis\n        role: master\n    spec:\n      containers:\n      - name: redis-master\n        image: redis:3.2.9\n        ports:\n        - name: redis-server\n          containerPort: 6379\n```\n\nThis yaml creates a redis database in a Deployment named 'redis-master'.\nIt will create a single instance, with replicas set to 1, and the guestbook app instances\nwill connect to it to persist data, as well as read the persisted data back.\nThe image running in the container is 'redis:3.2.9' and exposes the standard redis port 6379.\n\n- Create a redis Deployment, like we did for guestbook:\n\n    ```shell\n    kubectl create -f redis-master-deployment.yaml\n    ```\n\n- Check to see that redis server pod is running:\n\n    ```shell\n    $ kubectl get pods -lapp=redis,role=master\n    NAME                 READY     STATUS    RESTARTS   AGE\n    redis-master-q9zg7   1/1       Running   0          2d\n    ```\n\n- Let us test the redis standalone. Replace the pod name `redis-master-q9zg7` with the name of your pod.\n\n    ```shell\n    kubectl exec -it redis-master-q9zg7 redis-cli\n    ```\n\n    The kubectl exec command will start a secondary process in the specified\n    container. In this case we're asking for the \"redis-cli\" command to be\n    executed in the container named \"redis-master-q9zg7\".  When this process\n    ends the \"kubectl exec\" command will also exit but the other processes in\n    the container will not be impacted.\n\n    Once in the container we can use the \"redis-cli\" command to make sure the\n    redis database is running properly, or to configure it if needed.\n\n    ```shell\n    redis-cli> ping\n    PONG\n    redis-cli> exit\n    ```\n\nNow we need to expose the `redis-master` Deployment as a Service so that the\nguestbook application can connect to it through DNS lookup.\n\n> **redis-master-service.yaml**\n\n```yaml\napiVersion: v1\nkind: Service\nmetadata:\n  name: redis-master\n  labels:\n    app: redis\n    role: master\nspec:\n  ports:\n  - port: 6379\n    targetPort: redis-server\n  selector:\n    app: redis\n    role: master\n```\n\nThis creates a Service object named 'redis-master' and configures it to target\nport 6379 on the pods selected by the selectors \"app=redis\" and \"role=master\".\n\n- Create the service to access redis master:\n\n    ```shell\n    kubectl create -f redis-master-service.yaml\n    ```\n\n- Restart guestbook so that it will find the redis service to use database:\n\n    ```shell\n    kubectl delete deploy guestbook-v1\n    kubectl create -f guestbook-deployment.yaml\n    ```\n\n- Test guestbook app using a browser of your choice using the url `<your-cluster-ip>:<node-port>`, or by refreshing the page if you already have the app open in another window.\n\nYou can see now that if you open up multiple browsers and refresh the page\nto access the different copies of guestbook that they all have a consistent state.\nAll instances write to the same backing persistent storage, and all instances\nread from that storage to display the guestbook entries that have been stored.\n\nWe have our simple 3-tier application running but we need to scale the\napplication if traffic increases. Our main bottleneck is that we only have\none database server to process each request coming though guestbook. One\nsimple solution is to separate the reads and write such that they go to\ndifferent databases that are replicated properly to achieve data consistency.\n\n![rw_to_master](../images/Master.png)\n\nCreate a deployment named 'redis-slave' that can talk to redis database to\nmanage data reads. In order to scale the database we use the pattern where\nwe can scale the reads using redis slave deployment which can run several\ninstances to read. Redis slave deployments is configured to run two replicas.\n\n![master-slave](../images/Master-Slave.png)\n\n> **redis-slave-deployment.yaml**\n\n```yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: redis-slave\n  labels:\n    app: redis\n    role: slave\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      app: redis\n      role: slave\n  template:\n    metadata:\n      labels:\n        app: redis\n        role: slave\n    spec:\n      containers:\n      - name: redis-slave\n        image: ibmcom/guestbook-redis-slave:v2\n        ports:\n        - name: redis-server\n          containerPort: 6379\n```\n\n- Create the pod  running redis slave deployment.\n\n  ```shell\n  kubectl create -f redis-slave-deployment.yaml\n  ```\n\n- Check if all the slave replicas are running\n\n  ```shell\n  $ kubectl get pods -lapp=redis,role=slave\n  NAME                READY     STATUS    RESTARTS   AGE\n  redis-slave-kd7vx   1/1       Running   0          2d\n  redis-slave-wwcxw   1/1       Running   0          2d\n  ```\n\n- And then go into one of those pods and look at the database to see\n  that everything looks right. Replace the pod name `redis-slave-kd7vx` with your own pod name. If you get the back `(empty list or set)` when you print the keys, go to the guestbook application and add an entry!\n\n  ```shell\n  $ kubectl exec -it redis-slave-kd7vx  redis-cli\n  127.0.0.1:6379> keys *\n  1) \"guestbook\"\n  127.0.0.1:6379> lrange guestbook 0 10\n  1) \"hello world\"\n  2) \"welcome to the Kube workshop\"\n  127.0.0.1:6379> exit\n  ```\n\nDeploy redis slave service so we can access it by DNS name. Once redeployed,\nthe application will send \"read\" operations to the `redis-slave` pods while\n\"write\" operations will go to the `redis-master` pods.\n\n> **redis-slave-service.yaml**\n\n```yaml\napiVersion: v1\nkind: Service\nmetadata:\n  name: redis-slave\n  labels:\n    app: redis\n    role: slave\nspec:\n  ports:\n  - port: 6379\n    targetPort: redis-server\n  selector:\n    app: redis\n    role: slave\n```\n\n- Create the service to access redis slaves.\n\n    ```shell\n    kubectl create -f redis-slave-service.yaml\n    ```\n\n- Restart guestbook so that it will find the slave service to read from.\n\n    ```shell\n    kubectl delete deploy guestbook-v1\n    kubectl create -f guestbook-deployment.yaml\n    ```\n\n- Test guestbook app using a browser of your choice using the url `<your-cluster-ip>:<node-port>`, or by refreshing the page if you have the app open in another window.\n\nThat's the end of the lab. Now let's clean-up our environment:\n\n```shell\nkubectl delete -f guestbook-deployment.yaml\nkubectl delete -f guestbook-service.yaml\nkubectl delete -f redis-slave-service.yaml\nkubectl delete -f redis-slave-deployment.yaml\nkubectl delete -f redis-master-service.yaml\nkubectl delete -f redis-master-deployment.yaml\n```\n"
  },
  {
    "path": "docs/Lab4/README.md",
    "content": "# ***UNDER CONSTRUCTION***\n\n## 1. Check the health of apps\n\nKubernetes uses availability checks (liveness probes) to know when to restart a container. For example, liveness probes could catch a deadlock, where an application is running, but unable to make progress. Restarting a container in such a state can help to make the application more available despite bugs.\n\nAlso, Kubernetes uses readiness checks to know when a container is ready to start accepting traffic. A pod is considered ready when all of its containers are ready. One use of this check is to control which pods are used as backends for services. When a pod is not ready, it is removed from load balancers.\n\nIn this example, we have defined a HTTP liveness probe to check health of the container every five seconds. For the first 10-15 seconds the `/healthz` returns a `200` response and will fail afterward. Kubernetes will automatically restart the service.  \n\n1. Open the `healthcheck.yml` file with a text editor. This configuration script combines a few steps from the previous lesson to create a deployment and a service at the same time. App developers can use these scripts when updates are made or to troubleshoot issues by re-creating the pods:\n\n   1. Update the details for the image in your private registry namespace:\n\n      ```yaml\n      image: \"ibmcom/guestbook:v2\"\n      ```\n\n   2. Note the HTTP liveness probe that checks the health of the container every five seconds.\n\n      ```yaml\n      livenessProbe:\n                  httpGet:\n                    path: /healthz\n                    port: 3000\n                  initialDelaySeconds: 5\n                  periodSeconds: 5\n      ```\n\n   3. In the **Service** section, note the `NodePort`. Rather than generating a random NodePort like you did in the previous lesson, you can specify a port in the 30000 - 32767 range. This example uses 30072.\n\n2. Run the configuration script in the cluster. When the deployment and the service are created, the app is available for anyone to see:\n\n   ```bash\n   kubectl apply -f healthcheck.yml\n   ```\n\n   Now that all the deployment work is done, check how everything turned out. You might notice that because more instances are running, things might run a bit slower.\n\n3. Open a browser and check out the app. To form the URL, combine the IP with the NodePort that was specified in the configuration script. To get the public IP address for the worker node:\n\n   ```bash\n   ibmcloud cs workers <cluster-name>\n   ```\n\n   In a browser, you'll see a success message. If you do not see this text, don't worry. This app is designed to go up and down.\n\n   For the first 10 - 15 seconds, a 200 message is returned, so you know that the app is running successfully. After those 15 seconds, a timeout message is displayed, as is designed in the app.\n\n4. Launch your Kubernetes dashboard:\n\n   1. Get your credentials for Kubernetes.\n\n      ```bash\n      kubectl config view -o jsonpath='{.users[0].user.auth-provider.config.id-token}'\n      ```\n\n   2. Copy the **id-token** value that is shown in the output.\n\n   3. Set the proxy with the default port number.\n\n      ```bash\n      kubectl proxy\n      ```\n\n      Output:\n\n      ```bash\n      Starting to serve on 127.0.0.1:8001\n      ```\n\n   4. Sign in to the dashboard.\n\n      1. Open the following URL in a web browser.\n\n         ```bash\n         http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/\n         ```\n\n      2. In the sign-on page, select the **Token** authentication method.\n\n      3. Then, paste the **id-token** value that you previously copied into the **Token** field and click **SIGN IN**.\n\n   In the **Workloads** tab, you can see the resources that you created. From this tab, you can continually refresh and see that the health check is working. In the **Pods** section, you can see how many times the pods are restarted when the containers in them are re-created. You might happen to catch errors in the dashboard, indicating that the health check caught a problem. Give it a few minutes and refresh again. You see the number of restarts changes for each pod.\n\n5. Ready to delete what you created before you continue? This time, you can use the same configuration script to delete both of the resources you created.\n\n   ```kubectl delete -f healthcheck.yml```\n\n6. When you are done exploring the Kubernetes dashboard, in your CLI, enter `CTRL+C` to exit the `proxy` command.\n"
  },
  {
    "path": "docs/Lab4/healthcheck.yml",
    "content": "apiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n  name: hw-demo-deployment\nspec:\n  replicas: 3\n  template:\n    metadata:\n      name: pod-liveness-http\n      labels:\n        run: hw-demo-health\n        test: guestbook-demo\n    spec:\n      containers:\n        - name: hw-demo-container\n          image: \"ibmcom/guestbook:v2\"\n          imagePullPolicy: Always\n          livenessProbe:\n            httpGet:\n              path: /healthz\n              port: 3000\n            initialDelaySeconds: 5\n            periodSeconds: 5\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: hw-demo-service\n  labels:\n    run: hw-demo-health\nspec:\n  type: NodePort\n  selector:\n    run: hw-demo-health\n  ports:\n   - protocol: TCP\n     port: 3000\n     nodePort: 30072\n"
  },
  {
    "path": "docs/LabD/README.md",
    "content": "# Optional Debugging Lab - Tips and Tricks for Debugging Applications in Kubernetes\n\nAdvanced debugging techniques to reach your pods.\n\n## Pod Logs\n\nYou can look at the logs of any of the pods running under your deployments as follows\n\n```shell\nkubectl logs <podname>\n```\n\nRemember that if you have multiple containers running in your pod, you\nhave to specify the specific container you want to see logs from.\n\n```shell\nkubectl logs <pod-name> <container-name>\n```\n\nThis subcommand operates like `tail`. Including the `-f` flag will\ncontinue to stream the logs live once the current time is reached.\n\n## kubectl edit and vi\n\nBy default, on many Linux and macOS systems, you will be dropped into the editor `vi`.\n\n```shell\nexport EDITOR=nano\n```\n\nOn Windows, a copy of `notepad.exe` will be opened with the contents of the file.\n\n## busybox pod\n\nFor debugging live, this command frequently helps me:\n\n```shell\nkubectl create deployment bb --image busybox --restart=Never -it --rm\n```\n\nIn the busybox image is a basic shell that contains useful utilities.\n\nUtils I often use are `nslookup` and `wget`.\n\n`nslookup` is useful for testing DNS resolution in a pod.\n\n`wget` is useful for trying to do network requests.\n\n## Service Endpoints\n\nEndpoint resource can be used to see all the service endpoints.\n\n```shell\nkubectl get endpoints <service>\n```\n\n## ImagePullPolicy\n\nBy default Kubernetes will only pull the image on first use. This can\nbe confusing during development when you expect changes to show up.\n\nYou should be aware of the three `ImagePullPolicy`s:\n\n- IfNotPresent - the default, only request the image if not present.\n- Always - always request the image.\n- Never\n\nMore details on image management may be [found here](https://kubernetes.io/docs/concepts/containers/images/).\n"
  },
  {
    "path": "docs/MAINTAINERS.md",
    "content": "# Maintainers Guide\n\nThis guide is intended for maintainers - anybody with commit access to one or more Developer Technology repositories.\n\n## Maintainers\n\n| Name | GitHub | email |\n|---|---|---|\n| Nathan Fritze  | nfritze | nfritz@us.ibm.com |\n| Nathan LeViere | nathanleviere | njlevier@gmail.com |\n\n## Methodoology\n\nA master branch. This branch MUST be releasable at all times. Commits and merges against this branch MUST contain only bugfixes and/or security fixes. Maintenance releases are tagged against master.\n\nA develop branch. This branch contains your proposed changes\n\nThe remainder of this document details how to merge pull requests to the repositories.\n\n## Merge approval\n\nThe project maintainers use LGTM (Looks Good To Me) in comments on the code review to\nindicate acceptance. A change requires LGTMs from one of the maintainers of each\ncomponent affected.\n\n## Reviewing Pull Requests\n\nWe recommend reviewing pull requests directly within GitHub. This allows a public commentary on changes, providing transparency for all users. When providing feedback be civil, courteous, and kind. Disagreement is fine, so long as the discourse is carried out politely. If we see a record of uncivil or abusive comments, we will revoke your commit privileges and invite you to leave the project.\n\nDuring your review, consider the following points:\n\n## Does the change have impact?\n\nWhile fixing typos is nice as it adds to the overall quality of the project, merging a typo fix at a time can be a waste of effort. (Merging many typo fixes because somebody reviewed the entire component, however, is useful!) Other examples to be wary of:\n\nChanges in variable names. Ask whether or not the change will make understanding the code easier, or if it could simply a personal preference on the part of the author.\n\nEssentially: feel free to close issues that do not have impact.\n\n## Do the changes make sense?\n\nIf you do not understand what the changes are or what they accomplish, ask the author for clarification. Ask the author to add comments and/or clarify test case names to make the intentions clear.\n\nAt times, such clarification will reveal that the author may not be using the code correctly, or is unaware of features that accommodate their needs. If you feel this is the case, work up a code sample that would address the issue for them, and feel free to close the issue once they confirm.\n\n## Is this a new feature? If so\n\nDoes the issue contain narrative indicating the need for the feature? If not, ask them to provide that information. Since the issue will be linked in the changelog, this will often be a user's first introduction to it.\n\nAre new unit tests in place that test all new behaviors introduced? If not, do not merge the feature until they are!\nIs documentation in place for the new feature? (See the documentation guidelines). If not do not merge the feature until it is!\nIs the feature necessary for general use cases? Try and keep the scope of any given component narrow. If a proposed feature does not fit that scope, recommend to the user that they maintain the feature on their own, and close the request. You may also recommend that they see if the feature gains traction amongst other users, and suggest they re-submit when they can show such support.\n"
  },
  {
    "path": "docs/README.md",
    "content": "# IBM Cloud Kubernetes Service Lab\n\n<img src=\"https://kubernetes.io/images/favicon.png\" width=\"200\">\n\n## An introduction to containers\n\nHey, are you looking for a containers 101 course? Check out our [Docker Essentials](https://developer.ibm.com/courses/all/docker-essentials-extend-your-apps-with-containers/).\n\nContainers allow you to run securely isolated applications with quotas on system resources. Containers started out as an individual feature delivered with the linux kernel. Docker launched with making containers easy to use and developers quickly latched onto that idea. Containers have also sparked an interest in microservice architecture, a design pattern for developing applications in which complex applications are down into smaller, composable pieces which work together.\n\nWatch this [video](https://www.youtube.com/watch?v=wlBhtc31I8c) to learn about production uses of containers.\n\n## Objectives\n\nThis lab is an introduction to using Docker containers on Kubernetes in the IBM Cloud Kubernetes Service. By the end of the course, you'll achieve these objectives:\n\n* Understand core concepts of Kubernetes\n* Build a Docker image and deploy an application on Kubernetes in the IBM Cloud Kubernetes Service\n* Control application deployments, while minimizing your time with infrastructure management\n* Add AI services to extend your app\n* Secure and monitor your cluster and app\n\n## Prerequisites\n\n* A Pay-As-You-Go or Subscription [IBM Cloud account](https://console.bluemix.net/registration/)\n\n## Virtual machines\n\nPrior to containers, most infrastructure ran not on bare metal, but atop hypervisors managing multiple virtualized operating systems (OSes). This arrangement allowed isolation of applications from one another on a higher level than that provided by the OS. These virtualized operating systems see what looks like their own exclusive hardware. However, this also means that each of these virtual operating systems are replicating an entire OS, taking up disk space.\n\n## Containers\n\nContainers provide isolation similar to VMs, except provided by the OS and at the process level. Each container is a process or group of processes run in isolation. Typical containers explicitly run only a single process, as they have no need for the standard system services. What they usually need to do can be provided by system calls to the base OS kernel.\n\nThe isolation on linux is provided by a feature called 'namespaces'. Each different kind of isolation (IE user, cgroups) is provided by a different namespace.\n\nThis is a list of some of the namespaces that are commonly used and visible to the user:\n\n* PID - process IDs\n* USER - user and group IDs\n* UTS - hostname and domain name\n* NS - mount points\n* NET - network devices, stacks, and ports\n* CGROUPS - control limits and monitoring of resources\n\n## VM vs container\n\nTraditional applications are run on native hardware. A single application does not typically use the full resources of a single machine. We try to run multiple applications on a single machine to avoid wasting resources. We could run multiple copies of the same application, but to provide isolation we use VMs to run multiple application instances (VMs) on the same hardware. These VMs have full operating system stacks which make them relatively large and inefficient due to duplication both at runtime and on disk.\n\n![Containers versus VMs](images/VMvsContainer.png)\n\nContainers allow you to share the host OS. This reduces duplication while still providing the isolation. Containers also allow you to drop unneeded files such as system libraries and binaries to save space and reduce your attack surface. If SSHD or LIBC are not installed, they cannot be exploited.\n\n## Get set up\n\nBefore we dive into Kubernetes, you need to provision a cluster for your containerized app. Then you won't have to wait for it to be ready for the subsequent labs.\n\n1. You must install the CLIs per <https://console.ng.bluemix.net/docs/containers/cs_cli_install.html>. If you do not yet have these CLIs and the Kubernetes CLI, do [lab 0](Lab0) before starting the course.\n2. If you haven't already, provision a cluster. This can take a few minutes, so let it start first: `ibmcloud cs cluster-create --name <name-of-cluster>`\n3. After creation, before using the cluster, make sure it has completed provisioning and is ready for use. Run `ibmcloud cs clusters` and make sure that your cluster is in state \"deployed\".\n4. Then use `ibmcloud cs workers <name-of-cluster>` and make sure that all worker nodes are in state \"normal\" with Status \"Ready\".\n\n## Kubernetes and containers: an overview\n\nLet's talk about Kubernetes orchestration for containers before we build an application on it. We need to understand the following facts about it:\n\n* What is Kubernetes, exactly?\n* How was Kubernetes created?\n* Kubernetes architecture\n* Kubernetes resource model\n* Kubernetes at IBM\n* Let's get started\n\n## What is Kubernetes?\n\nNow that we know what containers are, let's define what Kubernetes is. Kubernetes is a container orchestrator to provision, manage, and scale applications. In other words, Kubernetes allows you to manage the lifecycle of containerized applications within a cluster of nodes (which are a collection of worker machines, for example, VMs, physical machines etc.).\n\nYour applications may need many other resources to run such as Volumes, Networks,  and Secrets that will help you to do things such as connect to databases, talk to firewalled backends, and secure keys. Kubernetes helps you add these resources into your application. Infrastructure resources needed by applications are managed declaratively.\n\n**Fast fact:** Other orchestration technologies are Mesos and Swarm.\n\nThe key paradigm of kubernetes is it’s Declarative model. The user provides the \"desired state\" and Kubernetes will do it's best make it happen. If you need 5 instances, you do not start 5 separate instances on your own but rather tell Kubernetes that you need 5 instances and Kubernetes will reconcile the state automatically. Simply at this point you need to know that you declare the state you want and Kubernetes makes that happen. If something goes wrong with one of your instances and it crashes, Kubernetes still knows the desired state and creates a new instances on an available node.\n\n**Fun to know:** Kubernetes goes by many names. Sometimes it is shortened to _k8s_ (losing the internal 8 letters), or _kube_. The word is rooted in ancient Greek and means \"Helmsman\". A helmsman is the person who steers a ship. We hope you can seen the analogy between directing a ship and the decisions made to orchestrate containers on a cluster.\n\n## How was Kubernetes created?\n\nGoogle wanted to open source their knowledge of creating and running the internal tools Borg & Omega. It adopted Open Governance for Kubernetes by starting the Cloud Native Computing Foundation (CNCF) and giving Kubernetes to that foundation, therefore making it less influenced by Google directly. Many companies such as RedHat, Microsoft, IBM and Amazon quickly joined the foundation.\n\nMain entry point for the kubernetes project is at [http://kubernetes.io](http://kubernetes.io) and the source code can be found at [https://github.com/kubernetes](https://github.com/kubernetes).\n\n## Kubernetes architecture\n\nAt its core, Kubernetes is a data store (etcd). The declarative model is stored in the data store as objects, that means when you say I want 5 instances of a container then that request is stored into the data store. This information change is watched and delegated to Controllers to take action. Controllers then react to the model and attempt to take action to achieve the desired state. The power of Kubernetes is in its simplistic model.\n\nAs shown, API server is a simple HTTP server handling create/read/update/delete(CRUD) operations on the data store. Then the controller picks up the change you wanted and makes that happen. Controllers are responsible for instantiating the actual resource represented by any Kubernetes resource. These actual resources are what your application needs to allow it to run successfully.\n\n![architecture diagram](images/kubernetes_arch.png)\n\n## Kubernetes resource model\n\nKubernetes Infrastructure defines a resource for every purpose. Each resource is monitored and processed by a controller. When you define your application, it contains a collection of these resources. This collection will then be read by Controllers to build your applications actual backing instances. Some of resources that you may work with are listed below for your reference, for a full list you should go to [https://kubernetes.io/docs/concepts/](https://kubernetes.io/docs/concepts/). In this class we will only use a few of them, like Pod, Deployment, etc.\n\n* Config Maps holds configuration data for pods to consume.\n* Daemon Sets ensure that each node in the cluster runs this Pod\n* Deployments defines a desired state of a deployment object\n* Events provides lifecycle events on Pods and other deployment objects\n* Endpoints allows a inbound connections to reach the cluster services\n* Ingress is a collection of rules that allow inbound connections to reach the cluster services\n* Jobs creates one or more pods and as they complete successfully the job is marked as completed.\n* Node is a worker machine in Kubernetes\n* Namespaces are multiple virtual clusters backed by the same physical cluster\n* Pods are the smallest deployable units of computing that can be created and managed in Kubernetes\n* Persistent Volumes provides an API for users and administrators that abstracts details of how storage is provided from how it is consumed\n* Replica Sets ensures that a specified number of pod replicas are running at any given time\n* Secrets are intended to hold sensitive information, such as passwords, OAuth tokens, and ssh keys\n* Service Accounts provides an identity for processes that run in a Pod\n* Services  is an abstraction which defines a logical set of Pods and a policy by which to access them - sometimes called a micro-service.\n* Stateful Sets is the workload API object used to manage stateful applications.\n* and more...\n\n![Relationship of pods, nodes, and containers](images/container-pod-node-master-relationship.jpg)\n\nKubernetes does not have the concept of an application. It has simple building blocks that you are required to compose. Kubernetes is a cloud native platform where the internal resource model is the same as the end user resource model.\n\n## Key resources\n\nA Pod is the smallest object model that you can create and run. You can add labels to a pod to identify a subset to run operations on. When you are ready to scale your application you can use the label to tell Kubernetes which Pod you need to scale. A Pod typically represent a process in your cluster. Pods contain at least one container that runs the job and additionally may have other containers in it called sidecars for monitoring, logging, etc. Essentially a Pod is a group of containers.\n\nWhen we talk about a application, we usually refer to group of Pods. Although an entire application can be run in a single Pod, we usually build multiple Pods that talk to each other to make a useful application. We will see why separating the application logic and backend database into separate Pods will scale better when we build an application shortly.\n\nServices define how to expose your app as a DNS entry to have a stable reference. We use query based selector to choose which pods are supplying that service.\n\nThe user directly manipulates resources via yaml:\n\n```bash\nkubectl (create|get|apply|delete) -f myResource.yaml\n```\n\nKubernetes provides us with a client interface through ‘kubectl’. Kubectl commands allow you to manage your applications, manage cluster and cluster resources, by modifying the model in the data store.\n\n## Kubernetes application deployment workflow\n\n![deployment workflow](images/app_deploy_workflow.png)\n\n1. User via \"kubectl\" deploys a new application. Kubectl sends the request to the API Server.\n2. API server receives the request and stores it in the data store (etcd). Once the request is written to data store, the API server is done with the request.\n3. Watchers detects the resource changes and send a notification to controller to act upon it\n4. Controller detects the new app and creates new pods to match the desired number# of instances. Any changes to the stored model will be picked up to create or delete Pods.\n5. Scheduler assigns new pods to a Node based on a criteria. Scheduler makes decisions to run Pods on specific Nodes in the cluster. Scheduler modifies the model with the node information.\n6. Kubelet on a node detects a pod with an assignment to itself, and deploys the requested containers via the container runtime (e.g. Docker). Each Node watches the storage to see what pods it is assigned to run. It takes necessary actions on resource assigned to it like create/delete Pods.\n7. Kubeproxy manages network traffic for the pods - including service discovery and load-balancing. Kubeproxy is responsible for communication between Pods that want to interact.\n\n## Lab information\n\nIBM Cloud provides the capability to run applications in containers on Kubernetes. The IBM Cloud Kubernetes Service runs Kubernetes clusters which deliver the following:\n\n* Powerful tools\n* Intuitive user experience\n* Built-in security and isolation to enable rapid delivery of secure applications\n* Cloud services including cognitive capabilities from Watson\n* Capability to manage dedicated cluster resources for both stateless applications and stateful workloads\n\n## Lab overview\n\n[Lab 0](Lab0) (Optional): Provides a walkthrough for installing IBM Cloud command-line tools and the Kubernetes CLI. You can skip this lab if you have the IBM Cloud CLI, the container-service plugin, the containers-registry plugin, and the kubectl CLI already installed on your machine.\n\n[Lab 1](Lab1): This lab walks through creating and deploying a simple \"guestbook\" app written in Go as a net/http Server and accessing it.\n\n[Lab 2](Lab2): Builds on lab 1 to expand to a more resilient setup which can survive having containers fail and recover. Lab 2 will also walk through basic services you need to get started with Kubernetes and the IBM Cloud Kubernetes Service\n\n[Lab 3](Lab3): Builds on lab 2 by increasing the capabilities of the deployed Guestbook application. This lab covers basic distributed application design and how kubernetes helps you use standard design practices.\n\n[Lab 4](Lab4): How to enable your application so Kubernetes can automatically monitor and recover your applications with no user intervention.\n\n[Lab D](LabD): Debugging tips and tricks to help you along your Kubernetes journey. This lab is useful reference that does not follow in a specific sequence of the other labs.\n"
  },
  {
    "path": "docs/SUMMARY.md",
    "content": "# Summary\n\n<!-- Rules of SUMMARY.md are here: https://docs.gitbook.com/integrations/github/content-configuration#summary -->\n<!-- All headings MUST be THREE hashmarks (###) -->\n<!-- Indented bullets (4 spaces) will make the first line be a section -->\n\n### Getting Started\n\n* [Lab 0: Get the IBM Cloud Container Service](Lab0/README.md)\n\n### Labs\n\n* [Lab 1. Set up and deploy your first application](Lab1/README.md)\n* [Lab 2: Scale and Update Deployments](Lab2/README.md)\n* [Lab 3: Scale and update apps natively, building multi-tier applications](Lab3/README.md)\n\n### Resources\n\n* [IBM Developer](https://developer.ibm.com)\n"
  },
  {
    "path": "docs/bx_login.sh",
    "content": "#!/bin/sh\n\nif [ -z $CF_ORG ]; then\n  CF_ORG=\"$BLUEMIX_ORG\"\nfi\nif [ -z $CF_SPACE ]; then\n  CF_SPACE=\"$BLUEMIX_SPACE\"\nfi\n\n\nif [ -z \"$BLUEMIX_API_KEY\" ] || [ -z \"$BLUEMIX_NAMESPACE\" ]; then\n  echo \"Define all required environment variables and rerun the stage.\"\n  exit 1\nfi\necho \"Deploy pods\"\n\necho \"bx login -a $CF_TARGET_URL\"\nbx login -a \"$CF_TARGET_URL\" -o \"$CF_ORG\" -s \"$CF_SPACE\" --apikey \"$BLUEMIX_API_KEY\"\nif [ $? -ne 0 ]; then\n  echo \"Failed to authenticate to IBM Cloud\"\n  exit 1\nfi\n\n# Init container clusters\necho \"bx cs init\"\nbx cs init\nif [ $? -ne 0 ]; then\n  echo \"Failed to initialize to IBM Cloud Kubernetes Service\"\n  exit 1\nfi\n\n# Init container registry\necho \"bx cr login\"\nbx cr login\nif [ $? -ne 0 ]; then\n  echo \"Failed to login to the IBM Cloud Container Registry\"\n  exit 1\nfi\n"
  },
  {
    "path": "docs/deploy.sh",
    "content": "#!/bin/bash\n\necho \"Create Demo Application\"\n\nIP_ADDR=$(bx cs workers $CLUSTER_NAME | grep normal | awk '{ print $2 }')\nif [ -z $IP_ADDR ]; then\n  echo \"$CLUSTER_NAME not created or workers not ready\"\n  exit 1\nfi\n\necho -e \"Configuring vars\"\nexp=$(bx cs cluster-config $CLUSTER_NAME | grep export)\nif [ $? -ne 0 ]; then\n  echo \"Cluster $CLUSTER_NAME not created or not ready.\"\n  exit 1\nfi\neval \"$exp\"\n\necho -e \"Setting up Stage 3 Watson Deployment yml\"\ncd Stage3/\n# curl --silent \"https://raw.githubusercontent.com/IBM/container-service-getting-started-wt/master/Stage3/watson-deployment.yml\" > watson-deployment.yml\n#\n## WILL NEED FOR LOADBALANCER ###\n# #Find the line that has the comment about the load balancer and add the nodeport def after this\n# let NU=$(awk '/^  # type: LoadBalancer/{ print NR; exit }' guestbook.yml)+3\n# NU=$NU\\i\n# sed -i \"$NU\\ \\ type: NodePort\" guestbook.yml #For OSX: brew install gnu-sed; replace sed references with gsed\n\necho -e \"Deleting previous version of Watson Deployment if it exists\"\nkubectl delete --ignore-not-found=true -f watson-deployment.yml\n\necho -e \"Unbinding previous version of Watson Tone Analyzer if it exists\"\nbx service list | grep tone\nif [ $? -eq 0 ]; then\n  bx cs cluster-service-unbind $CLUSTER_NAME default tone\nfi\n\necho -e \"Deleting previous Watson Tone Analyzer instance if it exists\"\nbx service delete tone -f\n\necho -e \"Creating new instance of Watson Tone Analyzer named tone...\"\nbx service create tone_analyzer standard tone\n\necho -e \"Binding Watson Tone Service to Cluster and Pod\"\nbx cs cluster-service-bind $CLUSTER_NAME default tone\n\necho -e \"Building Watson and Watson-talk images...\"\ncd watson/\ndocker build -t registry.ng.bluemix.net/contbot/watson . &> buildout.txt\nif [ $? -ne 0 ]; then\n  echo \"Could not create the watson image for the build\"\n  cat buildout.txt\n  exit 1\nfi\ndocker push registry.ng.bluemix.net/contbot/watson\nif [ $? -ne 0 ]; then\n  echo \"Could not push the watson image for the build\"\n  exit 1\nfi\ncd ..\ncd watson-talk/\ndocker build -t registry.ng.bluemix.net/contbot/watson-talk . &> buildout.txt\nif [ $? -ne 0 ]; then\n  echo \"Could not create the watson-talk image for the build\"\n  cat buildout.txt\n  exit 1\nfi\ndocker push registry.ng.bluemix.net/contbot/watson-talk\nif [ $? -ne 0 ] ; then\n  echo \"Could not push the watson image for the build\"\n  exit 1\nfi\n\necho -e \"Injecting image namespace into deployment yamls\"\ncd ..\nsed -i \"s/<namespace>/${BLUEMIX_NAMESPACE}/\" watson-deployment.yml\nif [ $? -ne 0 ] ; then\n  echo \"Could not inject image namespace into deployment yaml\"\n  exit 1\nfi\n\necho -e \"Creating pods\"\nkubectl create -f watson-deployment.yml\n\nPORT=$(kubectl get services | grep watson-service | sed 's/.*:\\([0-9]*\\).*/\\1/g')\n\necho \"\"\necho \"View the watson talk service at http://$IP_ADDR:$PORT\"\n"
  },
  {
    "path": "docs/deploy_rollup.sh",
    "content": "#!/bin/bash\necho \"Install IBM Cloud CLI\"\n. workshop/install_bx.sh\nif [ $? -ne 0 ]; then\n  echo \"Failed to install IBM Cloud Kubernetes Service CLI prerequisites\"\n  exit 1\nfi\n\necho \"Login to IBM Cloud\"\n. workshop/bx_login.sh\nif [ $? -ne 0 ]; then\n  echo \"Failed to authenticate to IBM Cloud Kubernetes Service\"\n  exit 1\nfi\n\necho \"Testing yml files for generalized namespace\"\n. workshop/test_yml.sh\nif [ $? -ne 0 ]; then\n  echo \"Failed to find <namespace> in deployment YAML files\"\n  exit 1\nfi\n\necho \"Deploy pods for Stage 3...\"\n. workshop/deploy.sh\nif [ $? -ne 0 ]; then\n  echo \"Failed to Deploy pods for stage 3 to IBM Cloud Kubernetes Service\"\n  exit 1\nfi\n"
  },
  {
    "path": "docs/install_bx.sh",
    "content": "#!/bin/bash\n\necho \"Download IBM Cloud CLI\"\nwget --quiet --output-document=/tmp/Bluemix_CLI_amd64.tar.gz  http://public.dhe.ibm.com/cloud/bluemix/cli/bluemix-cli/latest/Bluemix_CLI_amd64.tar.gz\ntar -xf /tmp/Bluemix_CLI_amd64.tar.gz --directory=/tmp\n\n# Create bx alias\necho \"#!/bin/sh\" >/tmp/Bluemix_CLI/bin/bx\necho \"/tmp/Bluemix_CLI/bin/bluemix \\\"\\$@\\\" \" >>/tmp/Bluemix_CLI/bin/bx\nchmod +x /tmp/Bluemix_CLI/bin/*\nchmod +x /tmp/Bluemix_CLI/bin/cfcli/*\n\nexport PATH=\"/tmp/Bluemix_CLI/bin:$PATH\"\n\n# Install IBM Cloud CS plugin\necho \"Install the IBM Cloud Kubernetes Service plugin\"\nbx plugin install container-service -r Bluemix\nbx plugin install container-registry -r Bluemix\n\necho \"Install kubectl\"\nwget --quiet --output-document=/tmp/Bluemix_CLI/bin/kubectl  https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl\nchmod +x /tmp/Bluemix_CLI/bin/kubectl\n\nif [ -n \"$DEBUG\" ]; then\n  bx --version\n  bx plugin list\nfi\n"
  },
  {
    "path": "docs/test_yml.sh",
    "content": "#!/bin/bash\necho \"Testing YAML files for <namespace>\"\nls */*.yml\nimageLines=`grep image: */*.yml`\nnamespaceLines=`grep \\<namespace\\> */*.yml`\nif [ \"$imageLines\" = \"$namespaceLines\" ]; then\n    echo \"<namespace> found as expected in YAML files\"\nelse\n    echo \"<namespace> NOT FOUND as expected in YAML files\"\n    exit 1\nfi\n\n"
  },
  {
    "path": "mkdocs.yml",
    "content": "# Project information\nsite_name: Kube 101 Workshop\nsite_url: https://ibm.github.io/kube101\nsite_author: IBM Developer\n\n# Repository\nrepo_name: kube101\nrepo_url: https://github.com/ibm/kube101\nedit_uri: edit/master/docs\n\n# Navigation\nnav:\n  - Welcome:\n    - About the workshop: README.md\n  - Workshop:\n    - Lab 0. Access a Kubernetes cluster: Lab0/README.md\n    - Lab 1. Deploy your first application: Lab1/README.md\n    - Lab 2. Scale and update deployments: Lab2/README.md\n    - Lab 3. Build multi-tier applications: Lab3/README.md\n\n## DO NOT CHANGE BELOW THIS LINE\n\n# Copyright\ncopyright: Copyright &copy; 2020 IBM Developer\n\n# Theme\ntheme:\n  name: material\n  font:\n    text: IBM Plex Sans\n    code: IBM Plex Mono\n  icon:\n    logo: material/library\n  features:\n    # - navigation.tabs\n    - navigation.instant\n    - navigation.expand\n  palette:\n    scheme: default\n    primary: blue\n    accent: blue\n\n# Plugins\nplugins:\n  - search\n\n# Customization\nextra:\n  social:\n    - icon: fontawesome/brands/github\n      link: https://github.com/ibm\n    - icon: fontawesome/brands/twitter\n      link: https://twitter.com/ibmdeveloper\n    - icon: fontawesome/brands/linkedin\n      link: https://www.linkedin.com/company/ibm/\n    - icon: fontawesome/brands/youtube\n      link: https://www.youtube.com/user/developerworks\n    - icon: fontawesome/brands/dev\n      link: https://dev.to/ibmdeveloper\n\n# Extensions\nmarkdown_extensions:\n  - abbr\n  - admonition\n  - attr_list\n  - def_list\n  - footnotes\n  - meta\n  - toc:\n      permalink: true\n  - pymdownx.arithmatex:\n      generic: true\n  - pymdownx.betterem:\n      smart_enable: all\n  - pymdownx.caret\n  - pymdownx.critic\n  - pymdownx.details\n  - pymdownx.emoji:\n      emoji_index: !!python/name:materialx.emoji.twemoji\n      emoji_generator: !!python/name:materialx.emoji.to_svg\n  - pymdownx.highlight\n  - pymdownx.inlinehilite\n  - pymdownx.keys\n  - pymdownx.mark\n  - pymdownx.smartsymbols\n  - pymdownx.snippets:\n      check_paths: true\n  - pymdownx.superfences:\n      custom_fences:\n        - name: mermaid\n          class: mermaid\n          format: !!python/name:pymdownx.superfences.fence_code_format\n  - pymdownx.tabbed\n  - pymdownx.tasklist:\n      custom_checkbox: true\n  - pymdownx.tilde\n"
  },
  {
    "path": "presentation/.keep",
    "content": ""
  },
  {
    "path": "presentation/scripts/README.md",
    "content": "# Instructions for Running the Presentation Demo Scripts\n\n## The following are needed:\n- `bx` executable\n- `bx cs` plugin\n- `docker` installed\n- `kubectl`\n\n## Prereq steps:\n- Pick a name for your cluster (e.g. `osscluster`)\n- Pick a name for your Registry namespace (e.g. `kube101`)\n- Log into IBM Cloud: `bx login` or if you use sso: `bx login --sso`\n- Log into the IBM Registry: `bx cr login`\n- Create a Kubernetes cluster: `bx cs cluster-create --name osscluster`, if\n  it doesn't already exist\n- Create the registry namespace: `bx cr namespace-add kube101`, if it doesn't\n  already exist\n\n## Running the demo scripts\n\n- Modify the `common.sh` file to make sure your cluster and registry\n  namespace values are set properly\n- Run each `sh` file as instructed in the [presentation](../Workshop.pptx)\n- As the scripts are running press `ENTER` or `space` when it pauses\n- If you press `f` it will remove the delay as it types\n- If you press `r` it will remove the pauses. So, if you want to do both\n  press `f` before you press `r`\n\n## Cleaning up\n\nRun `clean.sh` to clean up the environment. It does not erase your\ncluster or registry namespace.\n\n## Automated running of scripts\n\nTo run all of the script in an automated way, make sure your cluster and\nregistry namespace are ready and then:\n```\nSKIP=1 DELAY=0 ./all.sh\n```\n\n`SKIP=1` tells it to not pause at each command\n\n`DELAY=0` tells it to not print slowly, simulating typing\n"
  },
  {
    "path": "presentation/scripts/all.sh",
    "content": "#!/bin/bash\nset -e\n\n./lab1-1.sh\n./lab1-2.sh\n./lab1-3.sh\n./lab2-1.sh\n./lab2-2.sh\n\n./clean.sh\n"
  },
  {
    "path": "presentation/scripts/clean.sh",
    "content": "#!/bin/bash\n\nsource ./demoscript\n\ncomment \"Cleaning up...\"\n\nkubectl delete deploy/guestbook || true\nkubectl delete svc/guestbook || true\n"
  },
  {
    "path": "presentation/scripts/common.sh",
    "content": "#!/bin/bash\n\nsource ./demoscript\n\n# These need to be changed by the person running the demo, or set them\n# in your environment prior to running the scripts\nNAMESPACE=${NAMESPACE:-kube101}\nCLUSTER_NAME=${CLUSTER_NAME:-osscluster}\n\n# Should not need to touch these\nDEPLOYMENT_NAME=guestbook\nIMAGE_NAME=ibmcom/guestbook\n\n"
  },
  {
    "path": "presentation/scripts/demoscript",
    "content": "# SAVE=1      Save output to a tar file for off-line running\n# SKIP=1      Do not wait for user to press a key to continue\n# RECOVER=1   Use the canned output when a command fails\n# USESAVED=1  Use canned output instead of running the commands\n# USESAVED=2  Use canned output IFF it exists, otherwise run it\n\nscriptName=\"${0##.*/}\"\nbold=$(tput bold)\nnormal=$(tput sgr0)\ndelay=${DELAY:-\"0.02\"}\nskip=\"$SKIP\"\nsave=\"$SAVE\"\nsaveTar=$(cd $(dirname \"$0\");pwd)/${scriptName}.tar\nrecover=\"$RECOVER\"\nuseSaved=\"$USESAVED\"\n\ntrap clean EXIT\n\nfunction clean {\n\trm -f out\n}\n\nif [[ \"${useSaved}\" != \"\" && \"${useSaved}\" != \"2\" && ! -e \"${saveTar}\" ]]; then\n\techo \"Missing saved output file: ${saveTar}\"\n\texit 1\nfi\n\nif [[ \"${save}\" != \"\" && \"${useSaved}\" == \"\" ]]; then\n\trm -f \"${saveTar}\"\nfi\n\nfunction myscript() {\n    if [[ \"$(uname)\" == Darwin ]]; then\n    \tscript -q -a /dev/null $*\n    else\n    \tscript -efq -a /dev/null -c \"$*\"\n    fi\n}\n\nfunction slowType() {\n\tstr=\"$*\"\n\tif [[ \"$delay\" == \"0\" ]]; then\n\t\techo -n $bold$str$normal\n\t\treturn\n\tfi\n\tfor i in `seq 0 ${#str}`; do\n\t\techo -n $bold${str:$i:1}$normal\n\t\tsleep $delay\n\tdone\n}\n\nfunction slowTty() {\n\tstr=\"$*\"\n\techo -n $bold >&3\n\tif [[ \"$delay\" == \"0\" ]]; then\n\t\techo -n \"$str\"\n\telse\n\t    for i in `seq 0 ${#str}`; do\n\t\t    echo -n \"${str:$i:1}\"\n\t\t    sleep $delay\n\t    done\n\tfi\n\tsleep 0.2  # just to give the other program time to show its input\n\techo -n $normal >&3\n}\n\nfunction readChar() {\n\tread -s -n 1 ch\n\tcase \"$ch\" in\n\t\tf ) delay=\"0\" ;;\n\t\ts ) delay=${DELAY:-\"0.02\"} ;;\n\t\tr ) skip=\"x\" ;;\n\tesac\n}\n\nfunction pause() {\n\tif [[ \"$skip\" == \"\" ]]; then\n\t\treadChar\n\telse\n\t\tsleep 0.2\n\tfi\n}\n\ncmdNum=0\n\nfunction doit() {\n\tlocal ignorerc=\"\"\n\tlocal shouldfail=\"\"\n\tlocal noexec=\"\"\n\tlocal fakeit=${useSaved:-}\n\tlocal noscroll=${NOSCROLL:-}\n\tlocal postcmd=\"\"\n\n\twhile [[ \"$1\" == \"--\"* ]]; do\n\t\topt=\"$1\"\n\t\tshift\n\n\t\tcase \"$opt\" in\n\t\t\t--ignorerc   ) ignorerc=\"1\"   ;;\n\t\t\t--shouldfail ) shouldfail=\"1\" ;;\n\t\t\t--noexec     ) noexec=\"1\"     ;;\n\t\t\t--usesaved   ) fakeit=\"1\"     ;;\n\t\t\t--noscroll   ) noscroll=\"1\"   ;;\n\t\t\t--scroll     ) noscroll=\"\"    ;;\n\t\t\t--post*      ) postcmd=\"${opt#*=}\" ;;\n\t\tesac\n\tdone\n\n\tset +e\n\techo -n $bold\"$\"$normal\" \"\n\tpause\n\tslowType $*\n\techo \"$*\" >> cmds\n\tpause\n\techo\n\n\tsaveFile=\"run.${cmdNum}\"\n\tlocal lines=$(tput lines)\n\tlet lines=lines-3\n\tmoreCMD=\"more -$lines\"\n\tif [[ \"$skip\" != \"\" || \"$noscroll\" != \"\" ]]; then\n\t\tmoreCMD=\"cat\"\n\tfi\n\tif [[ \"$postcmd\" != \"\" ]]; then\n\t\tmoreCMD=\"$postcmd | $moreCMD\"\n\tfi\n\n\t# Unless we're told to not execute it, do it\n\tif [[ \"$noexec\" == \"\" ]]; then\n\t\tif [[ \"$fakeit\" != \"\" ]]; then\n\t\t\t# Faking it!\n\t\t\tif tar -xf \"${saveTar}\" \"${saveFile}\" > /dev/null 2>&1; then\n\t\t\t    # echo \"** Using saved output ${saveFile} **\"\n\t\t\t\tcp \"${saveFile}\" out\n\t\t\t\trm \"${saveFile}\"\n\t\t\t\tcat out | eval ${moreCMD[@]}\n\t\t\t\n\t\t\t\tif [[ \"$shouldfail\" == \"\" ]]; then\n\t\t\t\t\trc=0\n\t\t\t\telse\n\t\t\t\t\trc=1\n\t\t\t\tfi\n\t\t\telse\n\t\t\t\tif [[ \"$fakeit\" == \"2\" ]]; then\n\t\t\t\t\t# file doesn't exist so just try to run it instead\n\t\t\t\t\tfakeit=\"\"\n\t\t\t\telse\n\t\t\t\t\techo -n > out\n\t\t\t\tfi\n\t\t\tfi\n\t\tfi\n\n\t\tif [[ \"$fakeit\" == \"\" ]]; then\n\t\t\t# Run the cmd\n\t\t\tbash -c \" $* \" 2>&1 | tee out | eval ${moreCMD[@]}\n\t\t\trc=${PIPESTATUS[0]}\n\n\t\t\t# Save the output if we're asked to\n\t\t\tif [[ \"$save\" != \"\" ]]; then\n\t\t\t\tcp out \"${saveFile}\"\n\t\t\t\t# tar --delete -f \"${saveTar}\" \"${saveFile}\" > /dev/null 2>&1 || true\n\t\t\t\ttar -rf \"${saveTar}\" \"${saveFile}\"\n\t\t\t\trm \"${saveFile}\"\n\t\t\tfi\n\t\tfi\n\n\t\t# If the cmd failed see if we should use the canned output\n\t\tif [[ \"$recover\" != \"\" ]]; then\n\t\t\tif [[ ( ( \"$shouldfail\" == \"\" && \"$rc\" != \"0\" ) || \\\n\t\t        \t( \"$shouldfail\" != \"\" && \"$rc\" == \"0\" ) ) ]] && \\\n\t\t\t\t  tar -xf \"${saveTar}\" \"${saveFile}\" > /dev/null 2>&1 ; then\n\t\t\t\t# echo \"** Using saved output ${saveFile} **\"\n\t\t\t\tcp \"${saveFile}\" out\n\t\t\t\trm \"${saveFile}\"\n\t\t\tfi\n\t\tfi\n\t\tlet cmdNum=cmdNum+1\n\telse\n\t\t# We're not really executing it, just showing the cmd\n\t\techo -n > out\n\t\trc=0\n\tfi\n\n\techo\n\n\tif [[ \"$ignorerc\" == \"\" ]]; then\n\t\t# We're not totally ignoring the exit code\n\t\tif [[ \"$shouldfail\" != \"\" ]]; then\n\t\t\t# We need to make sure the command failed as expected\n\t\t\tif [[ \"$rc\" == \"0\" ]]; then\n\t\t\t\techo \"Expected non-zero exit code, got: $rc\"\n\t\t\t\texit 1\n\t\t\tfi\n\t\telse\n\t\t\t# Normal non-zero exit code expected case\n\t\t\tif [[ \"$rc\" != \"0\" ]]; then\n\t\t\t\techo \"Non-zero exit code: $rc\"\n\t\t\t\texit 1\n\t\t\tfi\n\t\tfi\n\tfi\n\n\tset -e\n}\n\nfunction background() {\n\techo -n $bold\"$\"$normal\" \"\n\tslowType $*\n\techo \"$*\" >> cmds\n\techo\n\tbash -c \" $* \" &\n}\n\nfunction ttyDoit() {\n\tlocal ignorerc=\"\"\n\tlocal shouldfail=\"\"\n\n\twhile [[ \"$1\" == \"--\"* ]]; do\n\t\topt=\"$1\"\n\t\tshift\n\n\t\tcase \"$opt\" in\n\t\t\t--ignorerc   ) ignorerc=\"1\"   ;;\n\t\t\t--shouldfail ) shouldfail=\"1\" ;;\n\t\tesac\n\tdone\n\n    echo -n $bold\"$\"$normal\" \"\n\tpause\n    slowType \"$*\"\n\tpause\n    echo\n\n    exec 3>&1\n\tset +e\n    (\n\t\tsleep 0.2\n        while read -u 10 line ; do\n\t\t\tdontWait=\"\"\n\t\t\tif [[ \"$line\" == \"run \"* ]]; then\n\t\t\t\tline=${line:4}\n\t\t\t\t${line}\n\t\t\t\tcontinue\n\t\t\tfi\n\t\t\tif [[ \"$line\" == \"@\"* ]]; then\n\t\t\t\t# Lines starting with \"@\" will be executed\n\t\t\t\t# immediately w/o pausing before or after showing it\n\t\t\t\tdontWait=\"x\"\n\t\t\t\tline=${line:1}\n\t\t\tfi\n\t\t\tif [[ \"$dontWait\" == \"\" ]]; then pause ; fi\n            slowTty $line\n\t\t\tif [[ \"$dontWait\" == \"\" ]]; then pause ; fi\n            echo\n\t\t\tsleep 0.2\n        done\n        echo\n    ) | myscript $*\n\trc=${PIPESTATUS[1]}\n\techo -n $normal\n\techo\n\t[[ \"$ignorerc\" == \"\" && \"$rc\" != \"0\" ]] && echo \"Non-zero exit code\" && exit 1\n\t[[ \"$shouldfail\" != \"\" && \"$rc\" == \"0\" ]] && echo \"Expected non-zero exit code\" && exit 1\n\tset -e\n}\n\nfunction comment() {\n\tlocal LF=\"\\\\n\"\n\tlocal CR=${LF}\n\tlocal echoopt=\"\"\n\tlocal dopause=\"\"\n\tlocal dopauseafter=\"\"\n\tlocal nohash=\"\"\n\n\twhile [[ \"$1\" == \"--\"* ]]; do\n\t\topt=\"$1\"\n\t\tshift\n\n\t\tcase \"$opt\" in\n\t\t\t--nolf  ) LF=\"\" ;;\n\t\t\t--nocr  ) CR=\"\" ; LF=\"\" ;;\n\t\t\t--pause ) dopause=\"1\" ; dopauseafter=\"1\" ;;\n\t\t\t--pauseafter ) dopauseafter=\"1\" ;;\n\t\t\t--nohash ) nohash=\"1\" ;;\n\t\tesac\n\tdone\n\tif [[ \"$nohash\" == \"\" ]]; then\n\t    echo -en $bold\\#\" \"$normal\n    fi\n\tif [[ \"$dopause\" == \"1\" ]]; then\n\t\tpause\n\tfi\n\techo -en ${echoopt} \"$bold$*$normal\"\n\tif [[ \"$dopause\" == \"1\" || \"$dopauseafter\" == \"1\" ]]; then\n\t\tpause\n\tfi\n\techo -en ${echoopt} \"${CR}${LF}\"\n}\n\n# Wait until the passed in cmd returns true\nfunction wait() {\n\t# set -x\n\tif [[ \"${useSaved}\" != \"\" ]]; then\n\t\treturn\n\tfi\n\tif [ \"$1\" == \"!\" ]; then\n\t\tshift\n\t    while (bash -c \" $* \" &> /dev/null); do\n\t        sleep 1\n\t    done\n\telse\n\t    while !(bash -c \" $* \" &> /dev/null); do\n\t        sleep 1\n\t    done\n\tfi\n\t# set +x\n}\n\nfunction scroll() {\n\tlocal lines=$(tput lines)\n\tlet lines=lines-3\n\n\techo -n $bold\"$\"$normal\" \"\n\t# set +e\n\tpause\n\tif [[ \"$skip\" == \"\" ]]; then\n\t  slowType more $*\n\telse\n\t  slowType cat $*\n\tfi\n\tpause\n\techo\n\tif [[ \"$skip\" == \"\" ]]; then\n\t  more -$lines $*\n\telse\n\t  cat $*\n\tfi\n\techo\n}\n"
  },
  {
    "path": "presentation/scripts/lab1-1.sh",
    "content": "#!/bin/bash\n\nsource ./common.sh\n\ncomment \"Pull the first version of our docker image\"\nDIR=$(pwd)\ncd ../../workshop/Lab1\ndoit docker pull ${IMAGE_NAME}:v1 \ncd $DIR\n\n# We're going to test the image locally with Docker before we run it on Kube.\n# This is to allow us ot see what the output looks like in advance.\n# Notice we're mapping port 8080 in the container to 32768 on the host.\ncomment \"First run/test locally\"\ndoit docker run -itd -p 32768:3000 ${IMAGE_NAME}:v1\nCID=$(cat out)  # Save the container ID\n\n# Now that our image is running, we will use curl to access the content\ncomment \"Test it\"\ndoit curl -s localhost:32768/hello\n\ncomment \"Clean up container\"\ndoit docker rm -f ${CID}\n\ncomment --pauseafter \"*** End of \"$(basename $0)\n"
  },
  {
    "path": "presentation/scripts/lab1-2.sh",
    "content": "#!/bin/bash\n\nsource ./common.sh\n\ncomment \"Deploy our app on kubernetes, using the image in our registry\"\ndoit kubectl run ${DEPLOYMENT_NAME} --image=${IMAGE_NAME}:v1 \n\ncomment \"The result of our run command is a deployment.\"\ndoit kubectl get deployment ${DEPLOYMENT_NAME}\n\ncomment --nolf \"Notice the desired & current states.\"\ncomment --nolf \"Kubernetes is reconciling to achieve our objective\"\ncomment  \"The actual unit of work is running in a pod\"\ndoit kubectl get pods -l run=guestbook\n\ncomment \"We can see that it is ready and running\"\n\ncomment --pauseafter \"*** End of \"$(basename $0)\n"
  },
  {
    "path": "presentation/scripts/lab1-3.sh",
    "content": "#!/bin/bash\n\nsource ./common.sh\n\ncomment --nolf \"Our app is running in our kubernetes cluster, but it is not reachable\"\ncomment \"We need to EXPOSE it before it is useful.\"\n\ndoit kubectl expose deployment ${DEPLOYMENT_NAME} --type=\"NodePort\" --port=3000\n\ncomment --nolf \"Now that it is exposed, curl it like we did before with docker\"\ncomment \"But first, get address of worker node\"\ndoit bx cs workers ${CLUSTER_NAME} --json\n\nWORKER_IP=$(cat out | grep publicIP | sed \"s/.*\\\"\\([0-9].*\\)\\\".*/\\1/g\" )\n\n# In that output, we can see the public IP\n# Next we need the node port assignment of the applicationon the cluster.\n# It was automatically assigned by the kubernetes runtime\"\n\ncomment --pauseafter \"Notice the 'publicIP' field\"\n\ncomment \"Get the nodePort of the service\"\ndoit kubectl get svc guestbook -ojson\n\nSERVICE_PORT=$(cat out | grep nodePort | sed \"s/.*: *\\([0-9]*\\).*/\\1/g\")\n\ncomment --pauseafter \"Notice the 'nodePort' field\"\n\ncomment \"Curl it...\"\ndoit curl -s ${WORKER_IP}:${SERVICE_PORT}/hello\n\ncomment --pauseafter \"*** End of \"$(basename $0)\n"
  },
  {
    "path": "presentation/scripts/lab2-1.sh",
    "content": "#!/bin/bash\n\nsource ./common.sh\n\nSERVICE_PORT=$(kubectl get svc guestbook -ojson | grep nodePort | sed \"s/.*: *\\([0-9]*\\).*/\\1/g\")\nWORKER_IP=$(bx cs workers ${CLUSTER_NAME} --json  | grep publicIP | sed \"s/.*\\\"\\([0-9].*\\)\\\".*/\\1/g\" )\n\nGUESTBOOK_CURL=${WORKER_IP}:${SERVICE_PORT}/hello\n\n# make sure the service works\ncomment \"We're starting with the same output as the previous lab\"\ndoit curl -s ${GUESTBOOK_CURL}\n\ncomment we have the pods\ndoit kubectl get pods -l run=guestbook\n\n# these pods come from the deployment\n# kubectl get deployment ${DEPLOYMENT_NAME}\n\ncomment --nolf \"You should start this watch in a separate terminal\"\ncomment \"    watch -d -n 0.2 curl -s ${GUESTBOOK_CURL}\"\ncomment --pauseafter \"Press ENTER when ready\"\n\nreplicas=$(( ( RANDOM % 5 )  + 2 ))\ncomment --nocr --nohash --pauseafter \"How many replicas? \"\ncomment --nohash $replicas\n\n# use `kubectl scale`\ndoit kubectl scale deployment ${DEPLOYMENT_NAME} --replicas ${replicas}\n\n# show the pods\ndoit kubectl get pods -l run=guestbook\n\ncomment --pauseafter \"*** End of \"$(basename $0)\n"
  },
  {
    "path": "presentation/scripts/lab2-2.sh",
    "content": "#!/bin/bash\n\nsource ./common.sh\n\n# `kubectl set image` to change the underlying image to our v2 version\ncomment \"Update the deployment with the new image\"\ndoit kubectl set image deployment ${DEPLOYMENT_NAME} guestbook=${IMAGE_NAME}:v2\n\ndoit kubectl describe deployment ${DEPLOYMENT_NAME}\nline=$(grep Image out)\ncomment --nolf \"Notice where is shows:\"\ncomment \"$line\"\n\ncomment --pauseafter \"*** End of \"$(basename $0)\n"
  }
]