[
  {
    "path": ".devcontainer/Dockerfile",
    "content": "# syntax = docker/dockerfile:1.0-experimental\n\n#\n# This is the base dockerfile to be used with the BUILDKIT to build the \n# image that the .devcontainer docker image is based on\n# \nFROM registry.access.redhat.com/ubi8/openjdk-11:latest\n\nUSER root\n\n# add a reference to fedora repo to install packages not part of the\n# ubi8 repos\nCOPY assets/fedora.repo /etc/yum.repos.d/fedora.repo\n\nRUN microdnf install dnf \\\n# install a smattering of useful packages (some of which are used later in dockerfile such as wget, zsh, and git)\n    && dnf install -y skopeo wget jq iputils vi procps git \\\n# Install packages from fedora (outside unsubscribed ubi8)\n    && dnf -y install --enablerepo fedora zsh tree \\\n# Install necessary tools to run antora    \n    && dnf -y install npm && npm i -g @antora/cli@2.3 @antora/site-generator-default@2.3 && npm rm --global npx && npm install --global npx && npm install --global gulp \\\n# Install yum so that docker can be installed in the container\n    && dnf -y install yum && yum install -y yum-utils \\\n# install docker repo\n    && yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo \\\n# install docker client\n    && yum install -y docker-ce-cli  \\\n# make sure jboss user has rights to run docker\n    && usermod -aG docker jboss  \\\n# cleanup packages and yum\n    && yum remove -y yum-utils && yum clean all && dnf clean all && rm -r /var/cache/dnf\n\n# install specific version of yq (2.4.1)\nRUN wget https://github.com/mikefarah/yq/releases/download/2.4.1/yq_linux_amd64 -O /usr/bin/yq && \\\n    chmod +x /usr/bin/yq \n\n# install stern\nRUN cd /usr/local/bin && \\\n    wget https://github.com/wercker/stern/releases/download/1.11.0/stern_linux_amd64 -O /usr/local/bin/stern && \\\n    chmod 755 /usr/local/bin/stern && \\\n# install hey\n    wget https://mwh-demo-assets.s3-ap-southeast-2.amazonaws.com/hey_linux_amd64 -O /usr/local/bin/hey && \\\n    chmod 755 /usr/local/bin/hey\n\n# overwrite existing oc with the absolute newest version of the openshift client\nRUN curl -L https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-client-linux.tar.gz | \\\n    tar -xvzf - -C /usr/local/bin/ oc && chmod 755 /usr/local/bin/oc && ln -s /usr/local/bin/oc /usr/local/bin/kubectl\n\nUSER jboss\n\n# install and configure ohmyzsh for jboss user\nRUN wget https://github.com/robbyrussell/oh-my-zsh/raw/master/tools/install.sh -O - | zsh\n\n# needed for krew commands\nENV PATH=\"$HOME/.krew/bin:$PATH\"\n\n# install kube ctx and kube ns via krew\nRUN ( set -x; cd \"$(mktemp -d)\" && \\\n  OS=\"$(uname | tr '[:upper:]' '[:lower:]')\" && \\\n  ARCH=\"$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\\(arm\\)\\(64\\)\\?.*/\\1\\2/' -e 's/aarch64$/arm64/')\" && \\\n  curl -fsSLO \"https://github.com/kubernetes-sigs/krew/releases/latest/download/krew.tar.gz\" && \\\n  tar zxvf krew.tar.gz && \\\n  KREW=./krew-\"${OS}_${ARCH}\" && \\\n  \"$KREW\" install krew ) &&\\\n  kubectl krew install ctx && kubectl krew install ns\n\n# Subdirectory where local-config files should reside (matched to gitignore to ensure no secrets are checked in)\nENV CONFIG_SUBDIR \"local-config\"\nENV DEMO_HOME \"/workspaces/kubernetes-tutorial/\"\n# Use VSCode with kubectl edit commands\nENV KUBE_EDITOR=\"code -w\"\n\n# this is done in the base image already (to support the demo shell images too), but for those that make\n# local changes to .zshrc they should not have to rebuild the base\nCOPY assets/.zshrc.example $HOME/.zshrc"
  },
  {
    "path": ".devcontainer/assets/.zshrc.example",
    "content": "# If you come from bash you might have to change your $PATH.\n# export PATH=$HOME/bin:/usr/local/bin:$PATH\n\n# Path to your oh-my-zsh installation.\nexport ZSH=\"$HOME/.oh-my-zsh\"\n\n# Set name of the theme to load --- if set to \"random\", it will\n# load a random theme each time oh-my-zsh is loaded, in which case,\n# to know which specific one was loaded, run: echo $RANDOM_THEME\n# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes\nZSH_THEME=\"robbyrussell\"\n\n# Set list of themes to pick from when loading at random\n# Setting this variable when ZSH_THEME=random will cause zsh to load\n# a theme from this variable instead of looking in ~/.oh-my-zsh/themes/\n# If set to an empty array, this variable will have no effect.\n# ZSH_THEME_RANDOM_CANDIDATES=( \"robbyrussell\" \"agnoster\" )\n\n# Uncomment the following line to use case-sensitive completion.\n# CASE_SENSITIVE=\"true\"\n\n# Uncomment the following line to use hyphen-insensitive completion.\n# Case-sensitive completion must be off. _ and - will be interchangeable.\n# HYPHEN_INSENSITIVE=\"true\"\n\n# Uncomment the following line to disable bi-weekly auto-update checks.\n# DISABLE_AUTO_UPDATE=\"true\"\n\n# Uncomment the following line to automatically update without prompting.\n# DISABLE_UPDATE_PROMPT=\"true\"\n\n# Uncomment the following line to change how often to auto-update (in days).\n# export UPDATE_ZSH_DAYS=13\n\n# Uncomment the following line if pasting URLs and other text is messed up.\n# DISABLE_MAGIC_FUNCTIONS=true\n\n# Uncomment the following line to disable colors in ls.\n# DISABLE_LS_COLORS=\"true\"\n\n# Uncomment the following line to disable auto-setting terminal title.\n# DISABLE_AUTO_TITLE=\"true\"\n\n# Uncomment the following line to enable command auto-correction.\n# ENABLE_CORRECTION=\"true\"\n\n# Uncomment the following line to display red dots whilst waiting for completion.\n# COMPLETION_WAITING_DOTS=\"true\"\n\n# Uncomment the following line if you want to disable marking untracked files\n# under VCS as dirty. This makes repository status check for large repositories\n# much, much faster.\n# DISABLE_UNTRACKED_FILES_DIRTY=\"true\"\n\n# Uncomment the following line if you want to change the command execution time\n# stamp shown in the history command output.\n# You can set one of the optional three formats:\n# \"mm/dd/yyyy\"|\"dd.mm.yyyy\"|\"yyyy-mm-dd\"\n# or set a custom format using the strftime function format specifications,\n# see 'man strftime' for details.\n# HIST_STAMPS=\"mm/dd/yyyy\"\n\n# Would you like to use another custom folder than $ZSH/custom?\n# ZSH_CUSTOM=/path/to/new-custom-folder\n\n# Which plugins would you like to load?\n# Standard plugins can be found in ~/.oh-my-zsh/plugins/*\n# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/\n# Example format: plugins=(rails git textmate ruby lighthouse)\n# Add wisely, as too many plugins slow down shell startup.\nplugins=(git)\n\nsource $ZSH/oh-my-zsh.sh\n\n# User configuration\n\n# export MANPATH=\"/usr/local/man:$MANPATH\"\n\n# You may need to manually set your language environment\n# export LANG=en_US.UTF-8\n\n# Preferred editor for local and remote sessions\n# if [[ -n $SSH_CONNECTION ]]; then\n#   export EDITOR='vim'\n# else\n#   export EDITOR='mvim'\n# fi\n\n# Compilation flags\n# export ARCHFLAGS=\"-arch x86_64\"\n\n# Set personal aliases, overriding those provided by oh-my-zsh libs,\n# plugins, and themes. Aliases can be placed here, though oh-my-zsh\n# users are encouraged to define aliases within the ZSH_CUSTOM folder.\n# For a full list of active aliases, run `alias`.\n#\n# Example aliases\n# alias zshconfig=\"mate ~/.zshrc\"\n# alias ohmyzsh=\"mate ~/.oh-my-zsh\"\n#\n\nsource $DEMO_HOME/scripts/shell-setup.sh\nexport PATH=\"${KREW_ROOT:-$HOME/.krew}/bin:$PATH\"\n"
  },
  {
    "path": ".devcontainer/assets/copy-kube-config.sh",
    "content": "#!/bin/bash -i\n\n# set -euo pipefail\n\n# Copies localhost's ~/.kube/config file into the container and swap out localhost\n# for host.docker.internal whenever a new shell starts to keep them in sync.\nif [ \"$SYNC_LOCALHOST_KUBECONFIG\" = \"true\" ] && [ -d \"/usr/local/share/kube-localhost\" ]; then\n    mkdir -p $HOME/.kube\n    cp -r /usr/local/share/kube-localhost/config $HOME/.kube/config\n    sed -i -e \"s/localhost/host.docker.internal/g\" $HOME/.kube/config\n    sed -i -e \"s/127.0.0.1/host.docker.internal/g\" $HOME/.kube/config\n\n    # If .minikube was mounted, set up client cert/key\n    if [ -d \"/usr/local/share/minikube-localhost\" ]; then\n        mkdir -p $HOME/.minikube\n        cp -r /usr/local/share/minikube-localhost/ca.crt $HOME/.minikube\n        # Location varies between versions of minikube\n        if [ -f \"/usr/local/share/minikube-localhost/client.crt\" ]; then\n            cp -r /usr/local/share/minikube-localhost/client.crt $HOME/.minikube\n            cp -r /usr/local/share/minikube-localhost/client.key $HOME/.minikube\n        elif [ -f \"/usr/local/share/minikube-localhost/profiles/${SYNC_MINIKUBE_PROFILE}/client.crt\" ]; then\n            cp -r /usr/local/share/minikube-localhost/profiles/${SYNC_MINIKUBE_PROFILE}/client.crt $HOME/.minikube\n            cp -r /usr/local/share/minikube-localhost/profiles/${SYNC_MINIKUBE_PROFILE}/client.key $HOME/.minikube\n        fi\n\n        # Point .kube/config to the correct locaiton of the certs\n        sed -i -r \"s|(\\s*certificate-authority:\\s).*|\\\\1$HOME\\/.minikube\\/ca.crt|g\" $HOME/.kube/config\n        sed -i -r \"s|(\\s*client-certificate:\\s).*|\\\\1$HOME\\/.minikube\\/client.crt|g\" $HOME/.kube/config\n        sed -i -r \"s|(\\s*client-key:\\s).*|\\\\1$HOME\\/.minikube\\/client.key|g\" $HOME/.kube/config\n    fi\nfi"
  },
  {
    "path": ".devcontainer/assets/fedora.repo",
    "content": "[fedora]\nname = Fedora\nbaseurl = https://mirror.aarnet.edu.au/pub/fedora/linux/releases/34/Everything/x86_64/os/\ngpgcheck=0\nenabled=0"
  },
  {
    "path": ".devcontainer/assets/post-start.sh",
    "content": "#!/bin/bash\n\nWORKSPACE_FOLDER=$1\n\nrsync -a ${WORKSPACE_FOLDER}/.devcontainer/workspace-setup/ ${WORKSPACE_FOLDER}/.vscode/ --ignore-existing\n\n${WORKSPACE_FOLDER}/.devcontainer/assets/copy-kube-config.sh"
  },
  {
    "path": ".devcontainer/devcontainer.json",
    "content": "{\n\t\"name\": \"DevNation Kubernetes Tutorial\",\n\t\"dockerFile\": \"Dockerfile\",\n\t\"runArgs\": [\n\t\t\"-v\", \"/var/run/docker.sock.raw:/var/run/docker.sock\",\n\t\t\"-v\", \"${env:HOME}/.vs-kubernetes:/home/jboss/.vs-kubernetes\",\n\n\t\t// use local .oh-my-zsh configuration if it exists (overwriting one in container).\n\t\t// comment the following line out if you want to use local installation on container instead\n\t\t\"-v\", \"${env:HOME}/.oh-my-zsh:/home/jboss/.oh-my-zsh\",\n\t\t\"-v\", \"${env:HOME}/.helm:/home/jboss/.helm\",\n\t\t\"-v\", \"${env:HOME}/.ssh:/home/jboss/.ssh\",\n\t\t// mount the maven cache locally\n\t\t\"-v\", \"${env:HOME}/.m2/:/home/jboss/.m2\",\n\t\t// mount npm cache locally\n\t\t\"-v\", \"${env:HOME}/.npm:/home/jboss/.npm\",\n\n\t\t// This allows us to reach the minikube instance from within the docker container\n\t\t\"--network\", \"host\",\n\t\t\n\t\t// override dockerfile DEMO_HOME to whatever folder vscode considers the root folder in the container\n\t\t\"-e\", \"DEMO_HOME=${containerWorkspaceFolder}\",\n\t],\n\t\"mounts\":[\n\t\t\"source=${env:HOME}${env:USERPROFILE}/.kube,target=/usr/local/share/kube-localhost,type=bind\",\n\t\t\"source=${env:HOME}${env:USERPROFILE}/.minikube,target=/usr/local/share/minikube-localhost,type=bind\"\n\t],\n\t\"remoteEnv\": {\n\t\t\"SYNC_LOCALHOST_KUBECONFIG\": \"true\",\n\t\t\"SYNC_MINIKUBE_PROFILE\": \"devnation\",\n\t\t\"HOST_USER\": \"${env:USER}\"\n\t},\n\t\"extensions\": [\n\t\t\"vscjava.vscode-java-pack\",\n\t\t\"redhat.vscode-xml\",\n\t\t\"redhat.vscode-quarkus\",\n\t\t\"ggrebert.quarkus-snippets\",\n\t\t\"humao.rest-client\",\n\t\t\"asciidoctor.asciidoctor-vscode\",\n\t\t\"madhavd1.javadoc-tools\"\n\t],\n\t\"postStartCommand\": \"${containerWorkspaceFolder}/.devcontainer/assets/post-start.sh ${containerWorkspaceFolder}\",\n\t\"settings\":{\n\t\t\"terminal.integrated.shell.linux\": \"/bin/zsh\",\n\t\t\"editor.tabCompletion\": \"on\",\n\t\t\"java.home\": \"/usr/lib/jvm/java-11-openjdk\",\n\t\t\"workbench.colorTheme\": \"Solarized Light\",\n\t\t\"http.proxyStrictSSL\": false,\n\t\t\"workbench.tips.enabled\": false,\n\t\t\"xml.format.enabled\": true,\n\t\t// don't pull in the .m2 cache \n\t\t\"files.exclude\": {\n\t\t\t\"**/.classpath\": true,\n\t\t\t\"**/.project\": true,\n\t\t\t\"**/.settings\": true,\n\t\t\t\"**/.factorypath\": true,\n            \"**/.m2\": true,\n        },\n\t\t// Don't import the example-operator project\n\t\t// these exclusions don't work entirely as advertised.  \n\t\t// See: https://github.com/redhat-developer/vscode-java/issues/1698\n\t\t\"java.import.exclusions\": [\n\t\t\t//\"**/example-operator\",\n\t\t\t//\"example-operator/**\",\n\t\t\t\"**/.m2/**\",        \n\t\t\t\"**/node_modules/**\",\n\t\t\t\"**/.metadata/**\",\n\t\t\t\"**/archetype-resources/**\",\n\t\t\t\"**/META-INF/maven/**\"\n\t\t]\n\t}\n}\n"
  },
  {
    "path": ".devcontainer/workspace-setup/asciidoc.json.code-snippets",
    "content": "{\n  \"Add Tabs\": {\n    \"prefix\": \"tabs\",\n    \"body\": [\n      \"[tabs]\",\n      \"====\",\n      \"${1:tab1}::\",\n      \"+\",\n      \"--\",\n      \"--\",\n      \"${2:tab2}::\",\n      \"+\",\n      \"--\",\n      \"--\",\n      \"====\"\n    ],\n    \"description\": \"Add Tabs macro\"\n  },\n  \"Add Navigation\": {\n    \"prefix\": \"nav\",\n    \"body\": [\n      \"${1|*,**,***|} xref:${2:page.adoc}[${3:Nav Title}]\"\n    ],\n    \"description\": \"Add new navigation\"\n  },\n  \"Console Input\": {\n    \"prefix\": \"input\",\n    \"body\": [\n      \"[.console-input]\",\n      \"[source,${1:bash},subs=\\\"${2:+macros,+attributes}\\\"]\",\n      \"----\",\n      \"${3:echo \\\"Hello World\\\"}\",\n      \"----\"\n    ],\n    \"description\": \"Adds Console Input source fragment\"\n  },\n  \"Console Output\": {\n    \"prefix\": \"output\",\n    \"body\": [\n      \"[.console-output]\",\n      \"[source,${1:bash},subs=\\\"${2:+macros,+attributes}\\\"]\",\n      \"----\",\n      \"${3:\\\"Hello World\\\"}\",\n      \"----\"\n    ],\n    \"description\": \"Adds Console Output source fragment\"\n  },\n  \"Asciidoc Tag\": {\n    \"prefix\": \"atag\",\n    \"body\": [\n      \"// tag::${1:tag_name}[]\",\n      \"${2:body}\",\n      \"// end::${1:tag_name}[]\"\n    ]\n  },\n  \"Partial Tag Include\": {\n    \"prefix\": \"tinclude\",\n    \"body\": [\n      \"include::partial$${1:include_name}.adoc[tags=**;!*;${2:tags_to_include}]\"\n    ],\n    \"description\": \"Include a partial with tags\"\n  },\n  \"Add Console Tab\": {\n    \"prefix\": \"tconsole\",\n    \"body\": [\n      \"[tabs]\",\n      \"====\",\n      \"${1:tab1}::\",\n      \"+\",\n      \"--\",\n      \"[.console-${2:input}]\",\n      \"[source,${3:bash},subs=\\\"${4:+macros,+attributes}\\\"]\",\n      \"----\",\n      \"${5:echo \\\"Hello World\\\"}\",\n      \"----\",\n      \"--\",\n      \"====\"\n    ],\n    \"description\": \"Add Tabs macro\"\n  },\n}"
  },
  {
    "path": ".devcontainer/workspace-setup/launch.json",
    "content": "{\n    // Use IntelliSense to learn about possible attributes.\n    // Hover to view descriptions of existing attributes.\n    // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387\n    \"version\": \"0.2.0\",\n    \"configurations\": [\n        {\n            \"type\": \"java\",\n            \"name\": \"Debug (Attach)\",\n            \"request\": \"attach\",\n            \"hostName\": \"localhost\",\n            \"port\": 5005,\n        }\n    ]\n}"
  },
  {
    "path": ".editorconfig",
    "content": "root = true\n\n[*]\nindent_style = space\ncharset = utf-8\ntrim_trailing_whitespace = false\ninsert_final_newline = false\n\n[*.java]\nindent_style = space\nindent_size = 4\n\n[*.xml]\nindent_style = space\nindent_size = 2"
  },
  {
    "path": ".github/workflows/docs.yml",
    "content": "name: docs\n\non:\n  push:\n    branches: \n    - v1.29\n    - v1.34\n    paths:\n    - .github/workflows/docs.yml\n    - github-pages.yml\n    - 'documentation/**'\n\njobs:\n  build-and-publish:\n    runs-on: ubuntu-22.04 \n    steps:\n    - name: Checkout project\n      uses: actions/checkout@v4\n      with:\n        fetch-depth: 0\n    - name: Run antora\n      uses: docker://antora/antora:2.3.1\n      with:\n        args: github-pages.yml\n    - name: Deploy to GitHub Pages\n      uses: JamesIves/github-pages-deploy-action@releases/v4\n      with:\n        token: \"${{github.token}}\"\n        FOLDER: gh-pages\n        BRANCH: gh-pages\n        commit-message: \"[docs] Publishing the docs for commit(s) ${{github.sha}}\"\n"
  },
  {
    "path": ".github/workflows/helloworld-go.yml",
    "content": "name: helloworld-go\n\non:\n  push:\n    branches: \n    - master\n    paths:\n    - '.github/workflows/helloworld-go.yml'\n    - 'apps/helloworld/go/**'\n\njobs:\n  build:\n    runs-on: ubuntu-18.04\n    steps:\n    - name: Checkout project\n      uses: actions/checkout@v2\n    - name: Setup Go\n      uses: actions/setup-go@v2.0.3\n      with:\n        go-version: '1.14.2'\n    - name: Build Go app\n      working-directory: apps/helloworld/go\n      run: go build myrest.go\n"
  },
  {
    "path": ".github/workflows/helloworld-quarkus.yml",
    "content": "name: helloworld-quarkus\n\non:\n  push:\n    branches: \n    - master\n    paths:\n    - '.github/workflows/helloworld-quarkus.yml'\n    - 'apps/helloworld/quarkus/**'\n\njobs:\n  build:\n    runs-on: ubuntu-18.04\n    steps:\n    - name: Checkout project\n      uses: actions/checkout@v2\n    - name: Setup Java JDK\n      uses: actions/setup-java@v2\n      with:\n        distribution: \"temurin\"\n        java-version: 11\n    - name: Maven build\n      working-directory: apps/helloworld/quarkus\n      run: mvn package\n"
  },
  {
    "path": ".github/workflows/helloworld-spring-boot.yml",
    "content": "name: helloworld-spring-boot\n\non:\n  push:\n    branches: \n    - master\n    paths:\n    - '.github/workflows/helloworld-spring-boot.yml'\n    - 'apps/helloworld/springboot/**'\n\njobs:\n  build:\n    runs-on: ubuntu-18.04\n    steps:\n    - name: Checkout project\n      uses: actions/checkout@v2\n    - name: Setup Java JDK\n      uses: actions/setup-java@v2\n      with:\n        distribution: \"temurin\"\n        java-version: 11\n    - name: Maven build\n      working-directory: apps/helloworld/springboot\n      run: mvn package\n"
  },
  {
    "path": ".gitignore",
    "content": ".DS_Store\ntarget\n*.iml\n.idea\n*.class\n*.log\n.cache\n/gh-pages\n/.cache\n*.swp\nnode_modules\n.classpath\n.project\n.settings\n.kube\n.minikube\n.DS_Store\n.vscode\nfirebase*\nnode_modules\n.firebaserc\n.firebase\n.vscode/\n\n# local kubernetes cluster info\nlocal-config/\n\n# once gulp is run, this file is generated.  Some debate whether this should be checked in or not\npackage-lock.json\nyarn.lock\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"{}\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright 2020 Red Hat Inc.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.adoc",
    "content": "# Kubernetes Tutorial \n\nimage:https://github.com/redhat-developer-demos/kubernetes-tutorial/workflows/docs/badge.svg[]\nimage:https://github.com/redhat-developer-demos/kubernetes-tutorial/workflows/helloworld-go/badge.svg[]\nimage:https://github.com/redhat-developer-demos/kubernetes-tutorial/workflows/helloworld-spring-boot/badge.svg[]\nimage:https://github.com/redhat-developer-demos/kubernetes-tutorial/workflows/helloworld-quarkus/badge.svg[]\n\nYou can access the HTML version of this tutorial here: https://redhat-scholars.github.io/kubernetes-tutorial/\n\n## Visual Studio Code Remote Development\n\nIf you are using link:https://code.visualstudio.com/[Visual Studio Code] with the link:https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers[Remote Containers Extension], you don't need to install anything locally to be able to contribute or run through this tutorial.\n\nSimply follow these instructions:\n\n1. (Only if running with podman) Set the environment variable `DEVCONTAINER_TARGET_PREFIX=podman`\n2. Open VS Code from the root of the `kubernetes-tutorial` repository and when prompted indicate that you want to \"open the folder in a container\".\n\nOnce the devcontainer is initialized, from the Visual Studio Code terminal you will be able to run all the commands outlined for creating documentation.\n\n### Execute Tutorial with VSCode Remote\n\nYou can also run through the tutorial with VSCode Remote.  The only trick is that you will need to be able to access minikube from within your docker container.\n\n## Building the HTML locally\n\nIn the root of your git repository, run:\n\n```\nbin/build-site.sh\n```\n\nAnd then open your `gh-pages/index.html` file:\n\n```\nopen gh-pages/index.html\n```\n\n## Iterative local development\n\nYou can develop the tutorial docs locally using a rapid iterative cycle.\n\nFirst, install the `yarn` dependencies:\n\n[source,bash]\n----\nyarn install\n----\n\nAnd now start `gulp`. It will create the website and open your browser connected with `browser-sync`. Everytime it detects a change, it will automatically refresh your browser page.\n\n[source,bash]\n----\ngulp\n----\n\nYou can clean the local cache using:\n\n[source,bash]\n----\ngulp clean\n----\n"
  },
  {
    "path": "apps/config/other.properties",
    "content": "DBCONN=jdbc:sqlserver://123.123.123.123:1443;user=MyUserName;password=*****;\nMSGBROKER=tcp://localhost:61616?jms.useAsyncSend=true"
  },
  {
    "path": "apps/config/some.properties",
    "content": "GREETING=jambo\nLOVE=Amour"
  },
  {
    "path": "apps/helloworld/go/Dockerfile",
    "content": "FROM registry.access.redhat.com/ubi8/ubi-minimal\nEXPOSE 8000\nCOPY myrest /usr/bin\nCMD /bin/sh -c '/usr/bin/myrest'\n\n"
  },
  {
    "path": "apps/helloworld/go/myrest.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"os\"\n//\t\"time\"\n)\n\nfunc main() {\n\n\t//api := mux.NewRouter()\n\thttp.HandleFunc(\"/\", HelloHandler)\n\t//http.Handle(\"/hello\", api)\n    \n\tfmt.Println(\"Listening on localhost:8000\")\n\thttp.ListenAndServe(\":8000\", nil)\n}\n\nfunc HelloHandler(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\tfmt.Println(\"unable to get hostname\")\n\t}\n    \n\t// fmt.Fprintf(w, \"Hello from Go! %s on %s\\n\", time.Now(), hostname)\n\tfmt.Fprintf(w, \"Go Hello on %s\\n\", hostname)\n}\n"
  },
  {
    "path": "apps/helloworld/go/readme.txt",
    "content": "\nDownload and install go\nhttps://golang.org/dl/\n\ngo build myrest.go\n\nthen run the compiled executable\n./myrest\n\ncurl localhost:8000/hello\n\nctrl-c\n\nNote: go compiles to native and if you have been using a Mac/Windows\nyou likely need to recompile the binary\n\nenv GOOS=linux GOARCH=amd64 go build myrest.go\n\ndocker build -t burr/mygo:v1 .\n\ndocker run -it -p 8000:8000 burr/mygo:v1\n\nThank you to Jesus R who figured this out for me!\nhttps://github.com/jmrodri/go-demo\n\n\n"
  },
  {
    "path": "apps/helloworld/nodejs/.devcontainer/Dockerfile",
    "content": "FROM nodeshift/centos7-s2i-nodejs:10.x\n\nLABEL maintainer=\"Burr Sutter \\\"burrsutter@gmail.com\\\"\"\n\nEXPOSE 8000\n\nWORKDIR /opt/app-root/src\n\nCMD [\"npm\", \"start\"]\n"
  },
  {
    "path": "apps/helloworld/nodejs/.devcontainer/devcontainer.json",
    "content": "{\n\t\"name\": \"Node Sample\",\n\t\"dockerFile\": \"Dockerfile\",\n\t\"appPort\": \"8000\",\n\t \"extensions\": [\n\t\t// \"afractal.node-essentials\",\n\t\t\"visualstudioexptteam.vscodeintellicode\",\n\t\t\"ms-vscode.node-debug2\"\n\t ]\n}\n"
  },
  {
    "path": "apps/helloworld/nodejs/Dockerfile",
    "content": "FROM nodeshift/centos7-s2i-nodejs:10.x\n\nLABEL maintainer=\"Burr Sutter \\\"burrsutter@gmail.com\\\"\"\n\nEXPOSE 8000\n\nWORKDIR /opt/app-root/src\n\nCOPY hello-http.js .\nCOPY package.json .\n\nCMD [\"npm\", \"start\"]\n"
  },
  {
    "path": "apps/helloworld/nodejs/hello-http.js",
    "content": "const os = require('os');\nconst http = require('http');\nlet cnt = 0;\n\nhttp.createServer((req, res) =>\n{\n    // don't increment the counter if the favicon.ico is being requested\n    if (req.url.toLowerCase() === '/favicon.ico') {\n        res.writeHead(200, { 'Content-Type': 'image/x-icon' });\n        res.end();\n        console.log('favicon requested');\n        return;\n    }\n\n    res.end(`Node Bonjour on ${os.hostname()} ${cnt++} \\n`);\n}\n    \n).listen(8000);\n\nconsole.log(`Server running at http://localhost:8000/`);\n\n"
  },
  {
    "path": "apps/helloworld/nodejs/readme.txt",
    "content": "Test it plain\nnode -v\nv8.11.3\nnpm -v\nv8.11.3\n\n\nnpm start\ncurl localhost:8000\n\nTest it in minishift or minikube's Docker\nminishift docker-env\nminikube docker-env\n\n\ndocker build -f Dockerfile -t dev.local/burrsutter/mynode:v1 .\nor \ndocker build -f Dockerfile.openshift -t dev.local/burrsutter/mynode:v1 .\n\ndocker login docker.io\ndocker images | grep mynode\ndocker tag $1 docker.io/burrsutter/mynode:v1\ndocker push docker.io/burrsutter/mynode:v1\nor\ndocker login quay.io\ndocker images | grep mynode\ndocker tag $1 quay.io/burrsutter/mynode:v1\ndocker push quay.io/burrsutter/mynode:v1\n\n\n\nto test via Docker:\ndocker run --rm -d -p 8000:8000 dev.local/burrsutter/mynode:v1\n\ndocker ps | grep mynode\n\ndocker stop 08efa083696b\n\n"
  },
  {
    "path": "apps/helloworld/python/Dockerfile",
    "content": "FROM python:2\n\nWORKDIR /usr/src/app\n\nCOPY requirements.txt ./\n\nRUN pip install --no-cache-dir -r requirements.txt\n\nCOPY . .\n\nEXPOSE 8000\n\nCMD [ \"python\", \"./app.py\" ]"
  },
  {
    "path": "apps/helloworld/python/app.py",
    "content": "import os\n\nfrom flask import Flask\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef main():\n    return \"Python Hello on \" + os.getenv('HOSTNAME', \"unknown\") + \"\\n\"\n\nif __name__ == \"__main__\":\n    app.run(host='0.0.0.0',port='8000')\n\n"
  },
  {
    "path": "apps/helloworld/python/readme.txt",
    "content": "https://www.python.org/ftp/python/2.7.15/python-2.7.15-macosx10.9.pkg\n\npython --version\nPython 2.7.15\n\npip --version\npip 19.0.3\n\npip install --no-cache-dir -r requirements.txt\n\npython app.py\n\ncurl localhost:8000\nctrl-c\n\ndocker build -t burrsutter/flask_web_app .\n\ndocker run -it -p 8000:8000 --rm  burrsutter/flask_web_app\n\ncurl localhost:8000"
  },
  {
    "path": "apps/helloworld/python/requirements.txt",
    "content": "Flask==1.0.2"
  },
  {
    "path": "apps/helloworld/quarkus/.dockerignore",
    "content": "*\n!target/*-runner"
  },
  {
    "path": "apps/helloworld/quarkus/buildNativeLinux.sh",
    "content": "#!/bin/bash\n\nexport GRAALVM_HOME=~/tools/graalvm-ce-19.1.1/Contents/Home/\n\nmvn package -Pnative -Dnative-image.docker-build=true -DskipTests"
  },
  {
    "path": "apps/helloworld/quarkus/buildNativeMac.sh",
    "content": "#!/bin/bash\n\nexport GRAALVM_HOME=~/tools/graalvm-ce-19.1.1/Contents/Home/\n\n# Mac Native\nmvn package -Pnative\n\n"
  },
  {
    "path": "apps/helloworld/quarkus/build_push_docker.sh",
    "content": "#!/bin/bash\n\nIMAGE_VER=quarkus-demo:2.0.0\n\ndocker build -f Dockerfile -t dev.local/burrsutter/$IMAGE_VER .\ndocker login docker.io\ndocker tag dev.local/burrsutter/$IMAGE_VER docker.io/burrsutter/$IMAGE_VER\ndocker push docker.io/burrsutter/$IMAGE_VER\n"
  },
  {
    "path": "apps/helloworld/quarkus/build_push_quay.sh",
    "content": "#!/bin/bash\n\nIMAGE_VER=quarkus-demo:2.0.0\n\ndocker build -f kubefiles/Dockerfile -t dev.local/burrsutter/$IMAGE_VER .\ndocker login quay.io\ndocker tag dev.local/burrsutter/$IMAGE_VER quay.io/burrsutter/$IMAGE_VER\ndocker push quay.io/burrsutter/$IMAGE_VER\n"
  },
  {
    "path": "apps/helloworld/quarkus/dockerbuild.sh",
    "content": "#!/bin/bash\n\ndocker build -f kubefiles/Dockerfile -t dev.local/rhdevelopers/quarkus-demo:v2 ."
  },
  {
    "path": "apps/helloworld/quarkus/dockerbuild_openshift.sh",
    "content": "#!/bin/bash\n\ndocker build -f kubefiles/Dockerfile.openshift -t dev.local/rhdevelopers/quarkus-demo:v2 ."
  },
  {
    "path": "apps/helloworld/quarkus/dockerpush_docker.sh",
    "content": "#!/bin/bash\n\n# use docker images | grep quarkus to get the image ID for $1\n\ndocker login docker.io\n\ndocker tag $1 docker.io/burrsutter/quarkus-demo:2.0.0\n\ndocker push docker.io/burrsutter/quarkus-demo:2.0.0\n\n"
  },
  {
    "path": "apps/helloworld/quarkus/dockerpush_quay.sh",
    "content": "#!/bin/bash\n\n# use docker images | grep quarkus to get the image ID for $1\n\ndocker login quay.io\n\ndocker tag $1 quay.io/rhdevelopers/quarkus-demo:v2\n\ndocker push quay.io/rhdevelopers/quarkus-demo:v2\n\necho 'quay.io marks repositories as private by default'\necho 'to update https://screencast.com/t/uAooYnghlW'"
  },
  {
    "path": "apps/helloworld/quarkus/kubefiles/Deployment.yml",
    "content": "apiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n  labels:\n    app: myquarkus\n  name: myquarkus\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myquarkus\n  template:\n    metadata:\n      labels:\n        app: myquarkus\n    spec:\n      containers:\n      - name: myquarkus\n        image: quay.io/rhdevelopers/quarkus-demo:v2\n        ports:\n          - containerPort: 8080\n        resources:\n          requests: \n            memory: \"50Mi\" \n            cpu: \"250m\" # 1/4 core\n          limits:\n            memory: \"50Mi\"\n            cpu: \"250m\" \n        livenessProbe:\n          httpGet:\n              port: 8080\n              path: /\n          initialDelaySeconds: 1\n          periodSeconds: 5\n          timeoutSeconds: 2          \n        readinessProbe:\n          httpGet:\n            path: /healthz\n            port: 8080\n          initialDelaySeconds: 1\n          periodSeconds: 3\n\n"
  },
  {
    "path": "apps/helloworld/quarkus/kubefiles/Deployment_quay.yml",
    "content": "apiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n  labels:\n    app: myquarkus\n  name: myquarkus\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myquarkus\n  template:\n    metadata:\n      labels:\n        app: myquarkus\n    spec:\n      containers:\n      - name: myquarkus\n        image: quay.io/rhdevelopers/quarkus-demo:v2\n        ports:\n          - containerPort: 8080\n        resources:\n          requests: \n            memory: \"50Mi\" \n            cpu: \"250m\" # 1/4 core\n          limits:\n            memory: \"50Mi\"\n            cpu: \"250m\" \n        livenessProbe:\n          httpGet:\n              port: 8080\n              path: /\n          initialDelaySeconds: 1\n          periodSeconds: 5\n          timeoutSeconds: 2          \n        readinessProbe:\n          httpGet:\n            path: /healthz\n            port: 8080\n          initialDelaySeconds: 1\n          periodSeconds: 3\n\n"
  },
  {
    "path": "apps/helloworld/quarkus/kubefiles/Dockerfile",
    "content": "FROM registry.access.redhat.com/ubi8/ubi-minimal\nWORKDIR /work/\nCOPY target/*-runner /work/application\nRUN chmod 775 /work\nEXPOSE 8080\nCMD [\"./application\", \"-Xmx8m\", \"-Xmn8m\", \"-Xms8m\"]"
  },
  {
    "path": "apps/helloworld/quarkus/kubefiles/Dockerfile.openshift",
    "content": "FROM registry.access.redhat.com/ubi8/ubi-minimal\nWORKDIR /work/\nRUN chgrp -R 0 /work && \\ \n    chmod -R g=u /work\nCOPY target/*-runner /work/application\nEXPOSE 8080\nUSER 1001\nENTRYPOINT [ \"./application\", \"-Xmx8m\", \"-Xmn8m\", \"-Xms8m\" ]\n"
  },
  {
    "path": "apps/helloworld/quarkus/kubefiles/Service.yml",
    "content": "apiVersion: v1\nkind: Service\nmetadata:\n  name: myquarkus\n  labels:\n    app: myquarkus    \nspec:\n  ports:\n  - name: http\n    port: 8080\n  selector:\n    app: myquarkus\n  type: LoadBalancer"
  },
  {
    "path": "apps/helloworld/quarkus/poller.sh",
    "content": "#!/bin/bash\n\nwhile true\ndo \n  curl $(minikube -p 9steps ip):$(kubectl get svc myapp -ojsonpath=\"{.spec.ports[?(@.port==8080)].nodePort}\")\n  sleep .2;\ndone\n\n"
  },
  {
    "path": "apps/helloworld/quarkus/pom.xml",
    "content": "<?xml version=\"1.0\"?>\n<project xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\" xmlns=\"http://maven.apache.org/POM/4.0.0\"\n    xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n  <modelVersion>4.0.0</modelVersion>\n  <groupId>com.redhat.developer.demo</groupId>\n  <artifactId>quarkus-demo</artifactId>\n  <version>2.0.0</version>\n  <properties>\n    <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>\n    <surefire-plugin.version>2.22.0</surefire-plugin.version>\n    <quarkus.version>1.3.2.Final</quarkus.version>\n    <maven.compiler.source>1.8</maven.compiler.source>\n    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>\n    <maven.compiler.target>1.8</maven.compiler.target>\n  </properties>\n  <dependencyManagement>\n    <dependencies>\n      <dependency>\n        <groupId>io.quarkus</groupId>\n        <artifactId>quarkus-bom</artifactId>\n        <version>${quarkus.version}</version>\n        <type>pom</type>\n        <scope>import</scope>\n      </dependency>\n    </dependencies>\n  </dependencyManagement>\n  <dependencies>\n    <dependency>\n      <groupId>io.quarkus</groupId>\n      <artifactId>quarkus-resteasy</artifactId>\n    </dependency>\n    <dependency>\n      <groupId>io.quarkus</groupId>\n      <artifactId>quarkus-junit5</artifactId>\n      <scope>test</scope>\n    </dependency>\n    <dependency>\n      <groupId>io.rest-assured</groupId>\n      <artifactId>rest-assured</artifactId>\n      <scope>test</scope>\n    </dependency>\n  </dependencies>\n  <build>\n    <plugins>\n      <plugin>\n        <groupId>io.quarkus</groupId>\n        <artifactId>quarkus-maven-plugin</artifactId>\n        <version>${quarkus.version}</version>\n        <executions>\n          <execution>\n            <goals>\n              <goal>build</goal>\n            </goals>\n          </execution>\n        </executions>\n      </plugin>\n      <plugin>\n        <artifactId>maven-surefire-plugin</artifactId>\n        <version>${surefire-plugin.version}</version>\n        <configuration>\n          <systemProperties>\n            <java.util.logging.manager>org.jboss.logmanager.LogManager</java.util.logging.manager>\n          </systemProperties>\n        </configuration>\n      </plugin>\n    </plugins>\n  </build>\n  <profiles>\n    <profile>\n      <id>native</id>\n      <activation>\n        <property>\n          <name>native</name>\n        </property>\n      </activation>\n      <build>\n        <plugins>\n          <plugin>\n            <groupId>io.quarkus</groupId>\n            <artifactId>quarkus-maven-plugin</artifactId>\n            <version>${quarkus.version}</version>\n            <executions>\n              <execution>\n                <goals>\n                  <goal>native-image</goal>\n                </goals>\n                <configuration>\n                  <enableHttpUrlHandler>true</enableHttpUrlHandler>\n                </configuration>\n              </execution>\n            </executions>\n          </plugin>\n          <plugin>\n            <artifactId>maven-failsafe-plugin</artifactId>\n            <version>${surefire-plugin.version}</version>\n            <executions>\n              <execution>\n                <goals>\n                  <goal>integration-test</goal>\n                  <goal>verify</goal>\n                </goals>\n                <configuration>\n                  <systemProperties>\n                    <native.image.path>${project.build.directory}/${project.build.finalName}-runner</native.image.path>\n                  </systemProperties>\n                </configuration>\n              </execution>\n            </executions>\n          </plugin>\n        </plugins>\n      </build>\n    </profile>\n  </profiles>\n</project>\n"
  },
  {
    "path": "apps/helloworld/quarkus/readme.txt",
    "content": "\nmvn compile quarkus:dev\ncurl localhost:8080\nctrl-c \n\nmvn clean package\n\n./buildNativeLinux.sh\n\n./dockerbuild.sh\n\n\nkubectl apply -f kubefiles/Deployment.yml\nOR\nkubectl apply -f kubefiles/Deployment_quay.yml\n\nkubectl apply -f kubefiles/Service.yml\n\n./poller.sh\n\n\n"
  },
  {
    "path": "apps/helloworld/quarkus/src/main/java/com/redhat/developer/demo/GreetingEndpoint.java",
    "content": "package com.redhat.developer.demo;\n\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\n\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n\n@Path(\"/\") \npublic class GreetingEndpoint {\n    \n    private String prefix = \"Supersonic Subatomic Java with Quarkus\";\n    \n    private String HOSTNAME =\n       System.getenv().getOrDefault(\"HOSTNAME\", \"unknown\");\n\n    private int count = 0;\n\n    @GET    \n    @Produces(MediaType.TEXT_PLAIN)\n    public String greet() {\n        count++;\n        return prefix + \" \" + HOSTNAME + \":\" + count + \"\\n\";\n    }\n\n    @GET\n    @Path(\"/healthz\")\n    @Produces(MediaType.TEXT_PLAIN)\n    public String health() {\n        return \"OK\";\n    }\n    \n    @GET\n    @Path(\"/myresources\") \n    public String getSystemResources() {\n         long memory = Runtime.getRuntime().maxMemory();\n         int cores = Runtime.getRuntime().availableProcessors();\n         System.out.println(\"/myresources \" + HOSTNAME);\n         return \n             \" Memory: \" + (memory / 1024 / 1024) +\n             \" Cores: \" + cores + \"\\n\";\n    }\n    \n    @GET\n    @Path(\"/consume\") \n    public String consumeSome() {\n        System.out.println(\"/consume \" + HOSTNAME);\n\n        Runtime rt = Runtime.getRuntime();\n        StringBuilder sb = new StringBuilder();\n        long maxMemory = rt.maxMemory();\n        long usedMemory = 0;\n        // while usedMemory is less than 80% of Max\n        while (((float) usedMemory / maxMemory) < 0.80) {\n            sb.append(System.nanoTime() + sb.toString());\n            usedMemory = rt.totalMemory();\n        }\n        String msg = \"Allocated about 80% (\" + humanReadableByteCount(usedMemory, false) + \") of the max allowed JVM memory size (\"\n            + humanReadableByteCount(maxMemory, false) + \")\";\n        System.out.println(msg);\n        return msg + \"\\n\";\n    }\n\n   public static String humanReadableByteCount(long bytes, boolean si) {\n      int unit = si ? 1000 : 1024;\n      if (bytes < unit)\n        return bytes + \" B\";\n      int exp = (int) (Math.log(bytes) / Math.log(unit));\n      String pre = (si ? \"kMGTPE\" : \"KMGTPE\").charAt(exp - 1) + (si ? \"\" : \"i\");\n      return String.format(\"%.1f %sB\", bytes / Math.pow(unit, exp), pre);  \n   }\n\n}"
  },
  {
    "path": "apps/helloworld/springboot/.devcontainer/Dockerfile",
    "content": "FROM openjdk:8u151\nENV JAVA_APP_JAR boot-demo-0.0.1.jar\n\n## Ensure maven is installed\nRUN apt-get update -y && apt-get install maven -y\n\nWORKDIR /app/\nEXPOSE 8080\nCMD java -XX:+PrintFlagsFinal -XX:+PrintGCDetails $JAVA_OPTIONS -jar $JAVA_APP_JAR\n"
  },
  {
    "path": "apps/helloworld/springboot/.devcontainer/devcontainer.json",
    "content": "{\n\t\"name\": \"Spring Boot Sample\",\n\t\"dockerFile\": \"Dockerfile\",\n\t\"appPort\": \"8080\",\n\t \"extensions\": [\n\t \t\"vscjava.vscode-java-pack\",\n\t\t \"redhat.vscode-xml\",\n\t ]\n}\n"
  },
  {
    "path": "apps/helloworld/springboot/Dockerfile",
    "content": "FROM openjdk:17.0-slim\nENV JAVA_APP_JAR boot-demo-1.0.0.jar\nWORKDIR /app/\nCOPY target/$JAVA_APP_JAR .\nEXPOSE 8080\nCMD java $JAVA_OPTIONS -jar $JAVA_APP_JAR"
  },
  {
    "path": "apps/helloworld/springboot/Dockerfile.openshift",
    "content": "FROM registry.access.redhat.com/ubi8/openjdk-8-runtime\nWORKDIR /work/\nENV JAVA_APP_JAR boot-demo-1.0.0.jar\n# the following is not needed on this Red Hat created image\n# RUN chgrp -R 0 /work && \\ \n#    chmod -R g=u /work\nCOPY target/$JAVA_APP_JAR .\nEXPOSE 8080\nUSER 1001\nCMD java $JAVA_OPTIONS -jar $JAVA_APP_JAR\n"
  },
  {
    "path": "apps/helloworld/springboot/Dockerfile_Java11",
    "content": "FROM openjdk:11-jre\nENV JAVA_APP_JAR boot-demo-1.0.0.jar\nWORKDIR /app/\nCOPY target/$JAVA_APP_JAR .\nEXPOSE 8080\nCMD java -jar $JAVA_APP_JAR\n"
  },
  {
    "path": "apps/helloworld/springboot/Dockerfile_Memory",
    "content": "FROM openjdk:8u151-jre\nENV JAVA_APP_JAR boot-demo-1.0.0.jar\nWORKDIR /app/\nCOPY target/$JAVA_APP_JAR .\nEXPOSE 8080\nCMD java -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap -XX:+PrintFlagsFinal -XX:+PrintGCDetails $JAVA_OPTIONS -jar $JAVA_APP_JAR\n"
  },
  {
    "path": "apps/helloworld/springboot/Dockerfile_Memory2",
    "content": "FROM openjdk:8u131-jre\nENV JAVA_APP_JAR boot-demo-1.0.0.jar\nWORKDIR /app/\nCOPY target/$JAVA_APP_JAR .\nEXPOSE 8080\nCMD java -Xmx112M -XX:+PrintFlagsFinal -XX:+PrintGCDetails $JAVA_OPTIONS -jar $JAVA_APP_JAR\n"
  },
  {
    "path": "apps/helloworld/springboot/build_push_docker.sh",
    "content": "#!/bin/bash\n\nIMAGE_VER=boot-demo:1.0.0\n\ndocker build -f Dockerfile -t dev.local/burrsutter/$IMAGE_VER .\ndocker login docker.io\ndocker tag dev.local/burrsutter/$IMAGE_VER docker.io/burrsutter/$IMAGE_VER\ndocker push docker.io/burrsutter/$IMAGE_VER\n"
  },
  {
    "path": "apps/helloworld/springboot/build_push_quay.sh",
    "content": "#!/bin/bash\n\nIMAGE_VER=boot-demo:1.0.0\n\ndocker build -f Dockerfile -t dev.local/burrsutter/$IMAGE_VER .\ndocker login quay.io\ndocker tag dev.local/burrsutter/$IMAGE_VER quay.io/burrsutter/$IMAGE_VER\ndocker push quay.io/burrsutter/$IMAGE_VER\n"
  },
  {
    "path": "apps/helloworld/springboot/pom.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?><project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n\t<modelVersion>4.0.0</modelVersion>\n\n\t<groupId>com.burrsutter</groupId>\n\t<artifactId>boot-demo</artifactId>\n\t<version>1.0.0</version>\n\t<packaging>jar</packaging>\n\n\t<name>helloboot</name>\n\t<description>Demo project for Spring Boot</description>\n\n\t<parent>\n\t\t<groupId>org.springframework.boot</groupId>\n\t\t<artifactId>spring-boot-starter-parent</artifactId>\n\t\t<version>3.3.4</version>\n\t\t<relativePath/> <!-- lookup parent from repository -->\n\t</parent>\n\n\t<properties>\n\t\t<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>\n\t\t<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>\n\t\t<java.version>17</java.version>\n\t</properties>\n\n\t<dependencies>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t<artifactId>spring-boot-starter-web</artifactId>\n\t\t</dependency>\n\n        <dependency>\n            <groupId>org.springframework.boot</groupId>\n            <artifactId>spring-boot-devtools</artifactId>\n            <scope>runtime</scope>\n        </dependency>\n\t\t<dependency>\n\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t<artifactId>spring-boot-starter-test</artifactId>\n\t\t\t<scope>test</scope>\n\t\t</dependency>\n\t</dependencies>\n\n\t<build>\n\t\t<plugins>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.springframework.boot</groupId>\n\t\t\t\t<artifactId>spring-boot-maven-plugin</artifactId>\n\t\t\t</plugin>\n\t\t\t<plugin>\n\t\t\t\t<groupId>org.apache.maven.plugins</groupId>\n\t\t\t\t<artifactId>maven-jar-plugin</artifactId>\n\t\t\t\t<version>3.3.0</version>\n\t\t\t</plugin>\n\t\t</plugins>\n\t</build>\n\n\n</project>"
  },
  {
    "path": "apps/helloworld/springboot/readme.txt",
    "content": "Initial pom.xml created by start.spring.io\n\nmvn clean compile package\n\njava -jar target/boot-demo-1.0.0.jar\nor\nmvn spring-boot:run\ncurl http://localhost:8080/\nctrl-c\n\nManual Deployment\n\nexport IMAGE_VER=boot-demo:1.0.0\n\ndocker build -f Dockerfile -t dev.local/burrsutter/$IMAGE_VER .\ndocker login docker.io\ndocker tag dev.local/burrsutter/$IMAGE_VER docker.io/burrsutter/$IMAGE_VER\ndocker push docker.io/burrsutter/$IMAGE_VER\n\nor\n\ndocker build -f Dockerfile -t dev.local/burrsutter/$IMAGE_VER .\ndocker login quay.io\ndocker tag dev.local/burrsutter/$IMAGE_VER quay.io/burrsutter/$IMAGE_VER\ndocker push quay.io/burrsutter/$IMAGE_VER\n\nor \ndocker build -f Dockerfile.openshift -t dev.local/burrsutter/$IMAGE_VER .\n"
  },
  {
    "path": "apps/helloworld/springboot/src/main/java/com/burrsutter/HellobootApplication.java",
    "content": "package com.burrsutter;\n\nimport org.springframework.boot.SpringApplication;\nimport org.springframework.boot.autoconfigure.SpringBootApplication;\n\n@SpringBootApplication\npublic class HellobootApplication {\n\n\tpublic static void main(String[] args) {\n\t\tSpringApplication.run(HellobootApplication.class, args);\n\t}\n}\n"
  },
  {
    "path": "apps/helloworld/springboot/src/main/java/com/burrsutter/MyRESTController.java",
    "content": "package com.burrsutter;\n\nimport java.io.FileWriter;\nimport java.io.IOException;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.core.env.Environment;\nimport org.springframework.http.HttpStatus;\nimport org.springframework.http.ResponseEntity;\nimport org.springframework.web.bind.annotation.GetMapping;\nimport org.springframework.web.bind.annotation.RestController;\nimport org.springframework.web.client.RestTemplate;\n\n@RestController\npublic class MyRESTController {\n     @Autowired\n     private Environment environment;\n\n     final String hostname = System.getenv().getOrDefault(\"HOSTNAME\", \"unknown\");\n     String greeting;\n\n     private int count = 0; // simple counter to see lifecycle\n     boolean behave = true;\n     boolean dead = false;\n\n     RestTemplate restTemplate = new RestTemplate();\n\n   @GetMapping(\"/appendgreetingfile\")\n   public ResponseEntity<String> appendGreetingToFile() throws IOException {\n     \n       try(final FileWriter fileWriter = new FileWriter(\"/tmp/demo/greeting.txt\", true)) {\n          fileWriter.append(environment.getProperty(\"GREETING\",\"Jambo\"));\n          fileWriter.close();\n       }\n       return ResponseEntity.status(HttpStatus.CREATED).build();\n   } \n\n\n   @GetMapping(\"/readgreetingfile\")\n   public String readGreetingFile() throws IOException {\n        return new String(Files.readAllBytes(Paths.get(\"/tmp/demo/greeting.txt\")));\n   }\n\n   @GetMapping(\"/\")\n   public String sayHello() {\n       greeting = environment.getProperty(\"GREETING\",\"Jambo\");\n       count++;\n       System.out.println(greeting + \" from \" + hostname + \" \" + count);\n       return greeting + \" from Spring Boot! \" + count + \" on \" + hostname + \"\\n\";\n   }\n\n   @GetMapping(\"/sysresources\")\n   public String getSystemResources() {\n        long memory = Runtime.getRuntime().maxMemory();\n        int cores = Runtime.getRuntime().availableProcessors();\n        System.out.println(\"/sysresources \" + hostname);\n        return \n            \" Memory: \" + (memory / 1024 / 1024) +\n            \" Cores: \" + cores + \"\\n\";\n   }\n\n   @GetMapping(\"/consume\")\n   public String consumeSome() {\n        System.out.println(\"/consume \" + hostname);\n\n        Runtime rt = Runtime.getRuntime();\n        StringBuilder sb = new StringBuilder();\n        long maxMemory = rt.maxMemory();\n        long usedMemory = 0;\n        // while usedMemory is less than 80% of Max\n        while (((float) usedMemory / maxMemory) < 0.80) {\n            sb.append(System.nanoTime() + sb.toString());\n            usedMemory = rt.totalMemory();\n        }\n        String msg = \"Allocated about 80% (\" + humanReadableByteCount(usedMemory, false) + \") of the max allowed JVM memory size (\"\n            + humanReadableByteCount(maxMemory, false) + \")\";\n        System.out.println(msg);\n        return msg + \"\\n\";\n   }\n\n   @GetMapping(\"/health\")\n   public ResponseEntity<String> health() {               \n        if (behave) {\n          return ResponseEntity.status(HttpStatus.OK)\n          .body(\"I am fine, thank you\\n\");     \n        } else {             \n          return ResponseEntity.status(HttpStatus.SERVICE_UNAVAILABLE).body(\"Bad\");          \n        }\n   }\n\n   @GetMapping(\"/misbehave\")\n   public ResponseEntity<String> misbehave() {\n        behave = false;\n        return ResponseEntity.status(HttpStatus.OK).body(\"Misbehaving\");\n   }\n\n   @GetMapping(\"/behave\")\n   public ResponseEntity<String> behave() {\n        behave = true;\n        return ResponseEntity.status(HttpStatus.OK).body(\"Ain't Misbehaving\");\n   }\n\n   @GetMapping(\"/shot\")\n   public ResponseEntity<String> shot() {\n        dead = true;\n        return ResponseEntity.status(HttpStatus.OK).body(\"I have been shot in the head\");\n        // https://www.quora.com/Why-can-zombies-only-die-by-being-shot-in-the-head-Why-can-they-survive-all-the-blood-loss-and-still-live-If-zombies-were-real-anyway\n   }\n\n   @GetMapping(\"/reborn\")\n   public ResponseEntity<String> reborn() {\n        dead = false;\n        return ResponseEntity.status(HttpStatus.OK).body(\"I have been reborn\");\n        // https://www.quora.com/Why-can-zombies-only-die-by-being-shot-in-the-head-Why-can-they-survive-all-the-blood-loss-and-still-live-If-zombies-were-real-anyway\n   }\n\n   @GetMapping(\"/alive\")\n   public ResponseEntity<String> alive() {\n    if (!dead) {\n      return ResponseEntity.status(HttpStatus.OK)\n      .body(\"It's Alive! (Frankenstein)\\n\");     \n    } else {             \n      return ResponseEntity.status(HttpStatus.SERVICE_UNAVAILABLE).body(\"All dead, not mostly dead (Princess Bride)\");\n    }\n}\n\n\n\n   @GetMapping(\"/configure\")\n   public String configure() {\n        String databaseConn = environment.getProperty(\"DBCONN\",\"Default\");\n        String msgBroker = environment.getProperty(\"MSGBROKER\",\"Default\");\n        greeting = environment.getProperty(\"GREETING\",\"Default\");\n        String love = environment.getProperty(\"LOVE\",\"Default\");\n        return \"Configuration for : \" + hostname + \"\\n\" \n            + \"databaseConn=\" + databaseConn + \"\\n\"\n            + \"msgBroker=\" + msgBroker + \"\\n\"\n            + \"greeting=\" + greeting + \"\\n\"\n            + \"love=\" + love + \"\\n\";\n   }\n\n   @GetMapping(\"/callinganother\")\n   public String callinganother() {\n        \n        // <servicename>.<namespace>.svc.cluster.local\n        String url = \"http://mynode.yourspace.svc.cluster.local:8000/\";\n\n        ResponseEntity<String> response\n        = restTemplate.getForEntity(url, String.class);\n    \n        String responseBody =  response.getBody();\n        System.out.println(responseBody);\n\n        return responseBody;\n   }\n\n   public static String humanReadableByteCount(long bytes, boolean si) {\n        int unit = si ? 1000 : 1024;\n        if (bytes < unit)\n            return bytes + \" B\";\n        int exp = (int) (Math.log(bytes) / Math.log(unit));\n        String pre = (si ? \"kMGTPE\" : \"KMGTPE\").charAt(exp - 1) + (si ? \"\" : \"i\");\n        return String.format(\"%.1f %sB\", bytes / Math.pow(unit, exp), pre);\n    }\n\n}"
  },
  {
    "path": "apps/kubefiles/demo-dynamic-persistent.yaml",
    "content": "kind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: myboot-volumeclaim\nspec:\n  accessModes:\n    - ReadWriteOnce\n  resources:\n    requests:\n      storage: 10Mi"
  },
  {
    "path": "apps/kubefiles/demo-ingress-2.yaml",
    "content": "apiVersion: networking.k8s.io/v1beta1\nkind: Ingress\nmetadata:\n  name: example-ingress\n  annotations:\n    nginx.ingress.kubernetes.io/rewrite-target: /$1\nspec:\n  rules:\n  - host: kube-devnation.info\n    http:\n      paths:\n      - path: /\n        backend:\n          serviceName: quarkus-demo-deployment\n          servicePort: 8080\n      - path: /v2\n        backend:\n          serviceName: mynode-deployment\n          servicePort: 8000"
  },
  {
    "path": "apps/kubefiles/demo-ingress.yaml",
    "content": "apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: example-ingress\n  annotations:\n    nginx.ingress.kubernetes.io/rewrite-target: /$1\nspec:\n  rules:\n  - host: kube-devnation.info\n    http:\n      paths:\n      - pathType: Prefix\n        path: /\n        backend:\n          service: \n            name: quarkus-demo-deployment\n            port:\n              number: 8080"
  },
  {
    "path": "apps/kubefiles/demo-persistent-volume-hostpath.yaml",
    "content": "kind: PersistentVolume\napiVersion: v1\nmetadata:\n  name: my-persistent-volume\n  labels:\n    type: local\nspec:\n  storageClassName: pv-demo \n  capacity:\n    storage: 100Mi\n  accessModes:\n    - ReadWriteOnce\n  hostPath:\n    path: \"/mnt/persistent-volume\"\n"
  },
  {
    "path": "apps/kubefiles/demo-persistent-volume-local.yaml",
    "content": "apiVersion: v1\nkind: PersistentVolume\nmetadata:\n  name: my-persistent-volume\nspec:\n  capacity:\n    storage: 10Mi\n  volumeMode: Filesystem\n  accessModes:\n  - ReadWriteOnce\n  storageClassName: pv-demo \n  local:\n    path: \"/tmp\"\n  nodeAffinity:\n    required:\n      nodeSelectorTerms:\n      - matchExpressions:\n        - key: kubernetes.io/hostname\n          operator: In\n          values:\n          - ip-10-0-138-222\n"
  },
  {
    "path": "apps/kubefiles/myboot-deployment-bad-image.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: myboot\n  name: myboot\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot\n  template:\n    metadata:\n      labels:\n        app: myboot\n    spec:\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboo:v1\n        ports:\n          - containerPort: 8080"
  },
  {
    "path": "apps/kubefiles/myboot-deployment-configuration-secret.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: myboot\n  name: myboot\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot\n  template:\n    metadata:\n      labels:\n        app: myboot\n    spec:\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n        ports:\n          - containerPort: 8080\n        volumeMounts:          \n          - name: mysecretvolume #<.>\n            mountPath: /mystuff/secretstuff\n            readOnly: true\n        resources:\n          requests: \n            memory: \"300Mi\" \n            cpu: \"250m\" # 1/4 core\n          limits:\n            memory: \"400Mi\"\n            cpu: \"1000m\" # 1 core\n      volumes:\n        - name: mysecretvolume #<.>\n          secret:\n            secretName: mysecret\n"
  },
  {
    "path": "apps/kubefiles/myboot-deployment-configuration.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: myboot\n  name: myboot\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot\n  template:\n    metadata:\n      labels:\n        app: myboot\n    spec:\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1  \n        ports:\n          - containerPort: 8080\n        envFrom:\n        - configMapRef:\n            name: my-config\n        resources:\n          requests: \n            memory: \"300Mi\" \n            cpu: \"250m\" # 1/4 core\n          limits:\n            memory: \"400Mi\"\n            cpu: \"1000m\" # 1 core\n\n"
  },
  {
    "path": "apps/kubefiles/myboot-deployment-live-ready-aggressive.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: myboot\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot\n  template:\n    metadata:\n      labels:\n        app: myboot\n        env: dev\n    spec:\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n        imagePullPolicy: Always\n        ports:\n        - containerPort: 8080\n        resources:\n          requests:\n            memory: \"300Mi\"\n            cpu: \"250m\" # 1/4 core\n          limits:\n            memory: \"400Mi\"\n            cpu: \"1000m\" # 1 core\n        livenessProbe:\n          httpGet:\n              port: 8080\n              path: /alive\n          periodSeconds: 2\n          timeoutSeconds: 2\n          failureThreshold: 2\n        readinessProbe:\n          httpGet:\n            path: /health\n            port: 8080\n          periodSeconds: 3"
  },
  {
    "path": "apps/kubefiles/myboot-deployment-live-ready.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: myboot\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot\n  template:\n    metadata:\n      labels:\n        app: myboot\n        env: dev\n    spec:\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n        imagePullPolicy: Always\n        ports:\n        - containerPort: 8080\n        resources:\n          requests:\n            memory: \"300Mi\"\n            cpu: \"250m\" # 1/4 core\n          limits:\n            memory: \"400Mi\"\n            cpu: \"1000m\" # 1 core\n        livenessProbe:\n          httpGet:\n              port: 8080\n              path: /alive\n          initialDelaySeconds: 10\n          periodSeconds: 5\n          timeoutSeconds: 2\n        readinessProbe:\n          httpGet:  \n            path: /health\n            port: 8080\n          initialDelaySeconds: 10\n          periodSeconds: 3\n"
  },
  {
    "path": "apps/kubefiles/myboot-deployment-resources-limits-v2.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: myboot-next\n  name: myboot-next\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot-next\n  template:\n    metadata:\n      labels:\n        app: myboot-next\n    spec:\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v3\n        ports:\n          - containerPort: 8080\n        resources:\n          requests: \n            memory: \"300Mi\" \n            cpu: \"250m\" # 1/4 core\n          limits:\n            memory: \"900Mi\"\n            cpu: \"1000m\" # 1 core\n\n"
  },
  {
    "path": "apps/kubefiles/myboot-deployment-resources-limits.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: myboot\n  name: myboot\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot\n  template:\n    metadata:\n      labels:\n        app: myboot\n    spec:\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n        ports:\n          - containerPort: 8080\n        resources:\n          requests: \n            memory: \"400Mi\" \n            cpu: \"250m\" # 1/4 core\n          # NOTE: These are the same limits we tested our Docker Container with earlier\n          # -m matches limits.memory and --cpus matches limits.cpu\n          limits:\n            memory: \"600Mi\"\n            cpu: \"1000m\" # 1 core\n\n"
  },
  {
    "path": "apps/kubefiles/myboot-deployment-resources.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: myboot\n  name: myboot\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot\n  template:\n    metadata:\n      labels:\n        app: myboot\n    spec:\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n        ports:\n          - containerPort: 8080\n        resources:\n          requests: \n            memory: \"300Mi\" \n            cpu: \"10000m\" # 10 cores\n\n"
  },
  {
    "path": "apps/kubefiles/myboot-deployment-startup-live-ready.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: myboot\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot\n  template:\n    metadata:\n      labels:\n        app: myboot\n        env: dev\n    spec:\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n        imagePullPolicy: Always\n        ports:\n        - containerPort: 8080\n        resources:\n          requests:\n            memory: \"300Mi\"\n            cpu: \"250m\" # 1/4 core\n          limits:\n            memory: \"400Mi\"\n            cpu: \"1000m\" # 1 core\n        livenessProbe:\n          httpGet:\n              port: 8080\n              path: /alive\n          periodSeconds: 2\n          timeoutSeconds: 2\n          failureThreshold: 2\n        readinessProbe:\n          httpGet:\n            path: /health\n            port: 8080\n          periodSeconds: 3\n        startupProbe:\n          httpGet:\n            path: /alive\n            port: 8080\n          failureThreshold: 6\n          periodSeconds: 5\n          timeoutSeconds: 1"
  },
  {
    "path": "apps/kubefiles/myboot-deployment.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: myboot\n  name: myboot\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot\n  template:\n    metadata:\n      labels:\n        app: myboot\n    spec:\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n        ports:\n          - containerPort: 8080\n"
  },
  {
    "path": "apps/kubefiles/myboot-node-affinity.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: myboot\n  name: myboot\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot\n  template:\n    metadata:\n      labels:\n        app: myboot\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n            - matchExpressions:\n              - key: color\n                operator: In\n                values:\n                - blue\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n        ports:\n          - containerPort: 8080"
  },
  {
    "path": "apps/kubefiles/myboot-persistent-volume-claim.yaml",
    "content": "kind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: myboot-volumeclaim\nspec:\n  storageClassName: pv-demo \n  accessModes:\n    - ReadWriteOnce\n  resources:\n    requests:\n      storage: 10Mi\n"
  },
  {
    "path": "apps/kubefiles/myboot-pod-affinity.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: myboot2\n  name: myboot2\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot2\n  template:\n    metadata:\n      labels:\n        app: myboot2\n    spec:\n      affinity:\n        podAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n          - topologyKey: kubernetes.io/hostname\n            labelSelector: \n              matchExpressions:\n              - key: app\n                operator: In\n                values:\n                - myboot\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n        ports:\n          - containerPort: 8080"
  },
  {
    "path": "apps/kubefiles/myboot-pod-antiaffinity.yaml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: myboot3\n  name: myboot3\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot3\n  template:\n    metadata:\n      labels:\n        app: myboot3\n    spec:\n      affinity:\n        podAntiAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n          - topologyKey: kubernetes.io/hostname\n            labelSelector: \n              matchExpressions:\n              - key: app\n                operator: In\n                values:\n                - myboot\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n        ports:\n          - containerPort: 8080"
  },
  {
    "path": "apps/kubefiles/myboot-pod-volume-hostpath.yaml",
    "content": "apiVersion: v1\nkind: Pod\nmetadata:\n  name: myboot-demo\nspec:\n  containers:\n  - name: myboot-demo\n    image: quay.io/rhdevelopers/myboot:v4\n    \n    volumeMounts:\n    - mountPath: /tmp/demo\n      name: demo-volume\n\n  volumes:\n  - name: demo-volume\n    hostPath: #<.> \n      path: \"/mnt/data\" #<.>\n"
  },
  {
    "path": "apps/kubefiles/myboot-pod-volume-pvc.yaml",
    "content": "apiVersion: v1\nkind: Pod\nmetadata:\n  name: myboot-demo\nspec:\n  containers:\n  - name: myboot-demo\n    image: quay.io/rhdevelopers/myboot:v4\n    \n    volumeMounts:\n    - mountPath: /tmp/demo\n      name: demo-volume\n\n  volumes:\n  - name: demo-volume\n    persistentVolumeClaim:\n      claimName: myboot-volumeclaim\n"
  },
  {
    "path": "apps/kubefiles/myboot-pod-volume.yml",
    "content": "apiVersion: v1\nkind: Pod #<.>\nmetadata:\n  name: myboot-demo\nspec:\n  containers:\n  - name: myboot-demo\n    image: quay.io/rhdevelopers/myboot:v4\n    \n    volumeMounts:\n    - mountPath: /tmp/demo #<.>\n      name: demo-volume #<.> \n\n  volumes:\n  - name: demo-volume\n    emptyDir: {}\n"
  },
  {
    "path": "apps/kubefiles/myboot-pods-volume.yml",
    "content": "apiVersion: v1\nkind: Pod\nmetadata:\n  name: myboot-demo\nspec:\n  containers:\n  - name: myboot-demo-1 #<.>\n    image: quay.io/rhdevelopers/myboot:v4\n    volumeMounts:\n    - mountPath: /tmp/demo\n      name: demo-volume\n\n  - name: myboot-demo-2 #<.>\n    image: quay.io/rhdevelopers/myboot:v4 #<.>\n\n    env:\n    - name: SERVER_PORT #<.>\n      value: \"8090\"\n\n    volumeMounts:\n    - mountPath: /tmp/demo\n      name: demo-volume\n\n  volumes:\n  - name: demo-volume #<.>\n    emptyDir: {}\n"
  },
  {
    "path": "apps/kubefiles/myboot-service.yml",
    "content": "apiVersion: v1\nkind: Service\nmetadata:\n  name: myboot\n  labels:\n    app: myboot    \nspec:\n  ports:\n  - name: http\n    port: 8080\n  selector:\n    app: myboot\n  type: LoadBalancer"
  },
  {
    "path": "apps/kubefiles/myboot-toleration.yaml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: myboot\n  name: myboot\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot\n  template:\n    metadata:\n      labels:\n        app: myboot\n    spec:\n      tolerations:\n      - key: \"color\"\n        operator: \"Equal\"\n        value: \"blue\"\n        effect: \"NoSchedule\"\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n        ports:\n          - containerPort: 8080\n\n"
  },
  {
    "path": "apps/kubefiles/mykafka.yml",
    "content": "apiVersion: kafka.strimzi.io/v1alpha1\nkind: Kafka\nmetadata: \n  name: my-cluster\nspec:\n  kafka:\n    replicas: 3\n    listeners:\n      external:\n        type: nodeport\n    storage:\n      type: ephemeral\n  zookeeper:\n    replicas: 3\n    storage:\n      type: ephemeral\n  entityOperator:\n    topicOperator: {}"
  },
  {
    "path": "apps/kubefiles/quarkus-daemonset.yaml",
    "content": "apiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: quarkus-daemonset\n  labels:\n    app: quarkus-daemonset\nspec:\n  selector:\n    matchLabels:\n      app: quarkus-daemonset\n  template:\n    metadata:\n      labels:\n        app: quarkus-daemonset\n    spec:\n      containers:\n      - name: quarkus-daemonset\n        image: quay.io/rhdevelopers/quarkus-demo:v1"
  },
  {
    "path": "apps/kubefiles/quarkus-statefulset-external-svc.yaml",
    "content": "apiVersion: v1\nkind: Service\nmetadata:\n  name: quarkus-statefulset-2\nspec:\n  type: LoadBalancer #<.>\n  externalTrafficPolicy: Local #<.>\n  selector:\n    statefulset.kubernetes.io/pod-name: quarkus-statefulset-2 #<.>\n  ports:\n  - port: 8080\n    name: web"
  },
  {
    "path": "apps/kubefiles/quarkus-statefulset.yaml",
    "content": "apiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: quarkus-statefulset\n  labels:\n    app: quarkus-statefulset\nspec:\n  selector:\n    matchLabels:\n      app: quarkus-statefulset\n  serviceName: \"quarkus\"\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: quarkus-statefulset\n    spec:\n      containers:\n      - name: quarkus-statefulset\n        image: quay.io/rhdevelopers/quarkus-demo:v1\n        ports:\n        - containerPort: 8080\n          name: web\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: quarkus\n  labels:\n    app: quarkus-statefulset\nspec:\n  ports:\n  - port: 8080\n    name: web\n  clusterIP: None\n  selector:\n    app: quarkus-statefulset\n---"
  },
  {
    "path": "apps/kubefiles/whalesay-cronjob.yaml",
    "content": "apiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: whale-say-cronjob\nspec:\n  schedule: \"* * * * *\" #<.>\n  jobTemplate:                   \n    spec:                        \n      template:    \n        metadata:\n          labels:\n            job-type: whale-say #<.>              \n        spec:\n          containers:\n          - name: whale-say-container\n            image: docker/whalesay\n            command: [\"cowsay\",\"Hello DevNation\"]\n          restartPolicy: Never"
  },
  {
    "path": "apps/kubefiles/whalesay-job.yaml",
    "content": "apiVersion: batch/v1\nkind: Job\nmetadata:\n  name: whale-say-job #<.>\nspec:\n  template:\n    spec:\n      containers:\n      - name: whale-say-container\n        image: docker/whalesay\n        command: [\"cowsay\",\"Hello DevNation\"]\n      restartPolicy: Never"
  },
  {
    "path": "apps/pizza-operator/.dockerignore",
    "content": "*\n!target/*-runner\n!target/*-runner.jar\n!target/lib/*"
  },
  {
    "path": "apps/pizza-operator/.gitignore",
    "content": "# Eclipse\n.project\n.classpath\n.settings/\nbin/\n\n# IntelliJ\n.idea\n*.ipr\n*.iml\n*.iws\n\n# NetBeans\nnb-configuration.xml\n\n# Visual Studio Code\n.vscode\n.factorypath\n\n# OSX\n.DS_Store\n\n# Vim\n*.swp\n*.swo\n\n# patch\n*.orig\n*.rej\n\n# Maven\ntarget/\npom.xml.tag\npom.xml.releaseBackup\npom.xml.versionsBackup\nrelease.properties"
  },
  {
    "path": "apps/pizza-operator/.mvn/wrapper/MavenWrapperDownloader.java",
    "content": "/*\n * Copyright 2007-present the original author or authors.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nimport java.net.*;\nimport java.io.*;\nimport java.nio.channels.*;\nimport java.util.Properties;\n\npublic class MavenWrapperDownloader {\n\n    private static final String WRAPPER_VERSION = \"0.5.6\";\n    /**\n     * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.\n     */\n    private static final String DEFAULT_DOWNLOAD_URL = \"https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/\"\n        + WRAPPER_VERSION + \"/maven-wrapper-\" + WRAPPER_VERSION + \".jar\";\n\n    /**\n     * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to\n     * use instead of the default one.\n     */\n    private static final String MAVEN_WRAPPER_PROPERTIES_PATH =\n            \".mvn/wrapper/maven-wrapper.properties\";\n\n    /**\n     * Path where the maven-wrapper.jar will be saved to.\n     */\n    private static final String MAVEN_WRAPPER_JAR_PATH =\n            \".mvn/wrapper/maven-wrapper.jar\";\n\n    /**\n     * Name of the property which should be used to override the default download url for the wrapper.\n     */\n    private static final String PROPERTY_NAME_WRAPPER_URL = \"wrapperUrl\";\n\n    public static void main(String args[]) {\n        System.out.println(\"- Downloader started\");\n        File baseDirectory = new File(args[0]);\n        System.out.println(\"- Using base directory: \" + baseDirectory.getAbsolutePath());\n\n        // If the maven-wrapper.properties exists, read it and check if it contains a custom\n        // wrapperUrl parameter.\n        File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);\n        String url = DEFAULT_DOWNLOAD_URL;\n        if(mavenWrapperPropertyFile.exists()) {\n            FileInputStream mavenWrapperPropertyFileInputStream = null;\n            try {\n                mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);\n                Properties mavenWrapperProperties = new Properties();\n                mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);\n                url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);\n            } catch (IOException e) {\n                System.out.println(\"- ERROR loading '\" + MAVEN_WRAPPER_PROPERTIES_PATH + \"'\");\n            } finally {\n                try {\n                    if(mavenWrapperPropertyFileInputStream != null) {\n                        mavenWrapperPropertyFileInputStream.close();\n                    }\n                } catch (IOException e) {\n                    // Ignore ...\n                }\n            }\n        }\n        System.out.println(\"- Downloading from: \" + url);\n\n        File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);\n        if(!outputFile.getParentFile().exists()) {\n            if(!outputFile.getParentFile().mkdirs()) {\n                System.out.println(\n                        \"- ERROR creating output directory '\" + outputFile.getParentFile().getAbsolutePath() + \"'\");\n            }\n        }\n        System.out.println(\"- Downloading to: \" + outputFile.getAbsolutePath());\n        try {\n            downloadFileFromURL(url, outputFile);\n            System.out.println(\"Done\");\n            System.exit(0);\n        } catch (Throwable e) {\n            System.out.println(\"- Error downloading\");\n            e.printStackTrace();\n            System.exit(1);\n        }\n    }\n\n    private static void downloadFileFromURL(String urlString, File destination) throws Exception {\n        if (System.getenv(\"MVNW_USERNAME\") != null && System.getenv(\"MVNW_PASSWORD\") != null) {\n            String username = System.getenv(\"MVNW_USERNAME\");\n            char[] password = System.getenv(\"MVNW_PASSWORD\").toCharArray();\n            Authenticator.setDefault(new Authenticator() {\n                @Override\n                protected PasswordAuthentication getPasswordAuthentication() {\n                    return new PasswordAuthentication(username, password);\n                }\n            });\n        }\n        URL website = new URL(urlString);\n        ReadableByteChannel rbc;\n        rbc = Channels.newChannel(website.openStream());\n        FileOutputStream fos = new FileOutputStream(destination);\n        fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);\n        fos.close();\n        rbc.close();\n    }\n\n}\n"
  },
  {
    "path": "apps/pizza-operator/.mvn/wrapper/maven-wrapper.properties",
    "content": "distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip\nwrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar\n"
  },
  {
    "path": "apps/pizza-operator/README.md",
    "content": "# pizza-operator project\n\nThis project uses Quarkus, the Supersonic Subatomic Java Framework.\n\nIf you want to learn more about Quarkus, please visit its website: https://quarkus.io/ .\n\n## Running the application in dev mode\n\nYou can run your application in dev mode that enables live coding using:\n```\n./mvnw quarkus:dev\n```\n\n## Packaging and running the application\n\nThe application can be packaged using `./mvnw package`.\nIt produces the `pizza-operator-1.0.0-SNAPSHOT-runner.jar` file in the `/target` directory.\nBe aware that it’s not an _über-jar_ as the dependencies are copied into the `target/lib` directory.\n\nThe application is now runnable using `java -jar target/pizza-operator-1.0.0-SNAPSHOT-runner.jar`.\n\n## Creating a native executable\n\nYou can create a native executable using: `./mvnw package -Pnative`.\n\nOr, if you don't have GraalVM installed, you can run the native executable build in a container using: `./mvnw package -Pnative -Dquarkus.native.container-build=true`.\n\nYou can then execute your native executable with: `./target/pizza-operator-1.0.0-SNAPSHOT-runner`\n\nIf you want to learn more about building native executables, please consult https://quarkus.io/guides/building-native-image."
  },
  {
    "path": "apps/pizza-operator/mvnw",
    "content": "#!/bin/sh\n# ----------------------------------------------------------------------------\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# ----------------------------------------------------------------------------\n\n# ----------------------------------------------------------------------------\n# Maven Start Up Batch script\n#\n# Required ENV vars:\n# ------------------\n#   JAVA_HOME - location of a JDK home dir\n#\n# Optional ENV vars\n# -----------------\n#   M2_HOME - location of maven2's installed home dir\n#   MAVEN_OPTS - parameters passed to the Java VM when running Maven\n#     e.g. to debug Maven itself, use\n#       set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000\n#   MAVEN_SKIP_RC - flag to disable loading of mavenrc files\n# ----------------------------------------------------------------------------\n\nif [ -z \"$MAVEN_SKIP_RC\" ] ; then\n\n  if [ -f /etc/mavenrc ] ; then\n    . /etc/mavenrc\n  fi\n\n  if [ -f \"$HOME/.mavenrc\" ] ; then\n    . \"$HOME/.mavenrc\"\n  fi\n\nfi\n\n# OS specific support.  $var _must_ be set to either true or false.\ncygwin=false;\ndarwin=false;\nmingw=false\ncase \"`uname`\" in\n  CYGWIN*) cygwin=true ;;\n  MINGW*) mingw=true;;\n  Darwin*) darwin=true\n    # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home\n    # See https://developer.apple.com/library/mac/qa/qa1170/_index.html\n    if [ -z \"$JAVA_HOME\" ]; then\n      if [ -x \"/usr/libexec/java_home\" ]; then\n        export JAVA_HOME=\"`/usr/libexec/java_home`\"\n      else\n        export JAVA_HOME=\"/Library/Java/Home\"\n      fi\n    fi\n    ;;\nesac\n\nif [ -z \"$JAVA_HOME\" ] ; then\n  if [ -r /etc/gentoo-release ] ; then\n    JAVA_HOME=`java-config --jre-home`\n  fi\nfi\n\nif [ -z \"$M2_HOME\" ] ; then\n  ## resolve links - $0 may be a link to maven's home\n  PRG=\"$0\"\n\n  # need this for relative symlinks\n  while [ -h \"$PRG\" ] ; do\n    ls=`ls -ld \"$PRG\"`\n    link=`expr \"$ls\" : '.*-> \\(.*\\)$'`\n    if expr \"$link\" : '/.*' > /dev/null; then\n      PRG=\"$link\"\n    else\n      PRG=\"`dirname \"$PRG\"`/$link\"\n    fi\n  done\n\n  saveddir=`pwd`\n\n  M2_HOME=`dirname \"$PRG\"`/..\n\n  # make it fully qualified\n  M2_HOME=`cd \"$M2_HOME\" && pwd`\n\n  cd \"$saveddir\"\n  # echo Using m2 at $M2_HOME\nfi\n\n# For Cygwin, ensure paths are in UNIX format before anything is touched\nif $cygwin ; then\n  [ -n \"$M2_HOME\" ] &&\n    M2_HOME=`cygpath --unix \"$M2_HOME\"`\n  [ -n \"$JAVA_HOME\" ] &&\n    JAVA_HOME=`cygpath --unix \"$JAVA_HOME\"`\n  [ -n \"$CLASSPATH\" ] &&\n    CLASSPATH=`cygpath --path --unix \"$CLASSPATH\"`\nfi\n\n# For Mingw, ensure paths are in UNIX format before anything is touched\nif $mingw ; then\n  [ -n \"$M2_HOME\" ] &&\n    M2_HOME=\"`(cd \"$M2_HOME\"; pwd)`\"\n  [ -n \"$JAVA_HOME\" ] &&\n    JAVA_HOME=\"`(cd \"$JAVA_HOME\"; pwd)`\"\nfi\n\nif [ -z \"$JAVA_HOME\" ]; then\n  javaExecutable=\"`which javac`\"\n  if [ -n \"$javaExecutable\" ] && ! [ \"`expr \\\"$javaExecutable\\\" : '\\([^ ]*\\)'`\" = \"no\" ]; then\n    # readlink(1) is not available as standard on Solaris 10.\n    readLink=`which readlink`\n    if [ ! `expr \"$readLink\" : '\\([^ ]*\\)'` = \"no\" ]; then\n      if $darwin ; then\n        javaHome=\"`dirname \\\"$javaExecutable\\\"`\"\n        javaExecutable=\"`cd \\\"$javaHome\\\" && pwd -P`/javac\"\n      else\n        javaExecutable=\"`readlink -f \\\"$javaExecutable\\\"`\"\n      fi\n      javaHome=\"`dirname \\\"$javaExecutable\\\"`\"\n      javaHome=`expr \"$javaHome\" : '\\(.*\\)/bin'`\n      JAVA_HOME=\"$javaHome\"\n      export JAVA_HOME\n    fi\n  fi\nfi\n\nif [ -z \"$JAVACMD\" ] ; then\n  if [ -n \"$JAVA_HOME\"  ] ; then\n    if [ -x \"$JAVA_HOME/jre/sh/java\" ] ; then\n      # IBM's JDK on AIX uses strange locations for the executables\n      JAVACMD=\"$JAVA_HOME/jre/sh/java\"\n    else\n      JAVACMD=\"$JAVA_HOME/bin/java\"\n    fi\n  else\n    JAVACMD=\"`which java`\"\n  fi\nfi\n\nif [ ! -x \"$JAVACMD\" ] ; then\n  echo \"Error: JAVA_HOME is not defined correctly.\" >&2\n  echo \"  We cannot execute $JAVACMD\" >&2\n  exit 1\nfi\n\nif [ -z \"$JAVA_HOME\" ] ; then\n  echo \"Warning: JAVA_HOME environment variable is not set.\"\nfi\n\nCLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher\n\n# traverses directory structure from process work directory to filesystem root\n# first directory with .mvn subdirectory is considered project base directory\nfind_maven_basedir() {\n\n  if [ -z \"$1\" ]\n  then\n    echo \"Path not specified to find_maven_basedir\"\n    return 1\n  fi\n\n  basedir=\"$1\"\n  wdir=\"$1\"\n  while [ \"$wdir\" != '/' ] ; do\n    if [ -d \"$wdir\"/.mvn ] ; then\n      basedir=$wdir\n      break\n    fi\n    # workaround for JBEAP-8937 (on Solaris 10/Sparc)\n    if [ -d \"${wdir}\" ]; then\n      wdir=`cd \"$wdir/..\"; pwd`\n    fi\n    # end of workaround\n  done\n  echo \"${basedir}\"\n}\n\n# concatenates all lines of a file\nconcat_lines() {\n  if [ -f \"$1\" ]; then\n    echo \"$(tr -s '\\n' ' ' < \"$1\")\"\n  fi\n}\n\nBASE_DIR=`find_maven_basedir \"$(pwd)\"`\nif [ -z \"$BASE_DIR\" ]; then\n  exit 1;\nfi\n\n##########################################################################################\n# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central\n# This allows using the maven wrapper in projects that prohibit checking in binary data.\n##########################################################################################\nif [ -r \"$BASE_DIR/.mvn/wrapper/maven-wrapper.jar\" ]; then\n    if [ \"$MVNW_VERBOSE\" = true ]; then\n      echo \"Found .mvn/wrapper/maven-wrapper.jar\"\n    fi\nelse\n    if [ \"$MVNW_VERBOSE\" = true ]; then\n      echo \"Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ...\"\n    fi\n    if [ -n \"$MVNW_REPOURL\" ]; then\n      jarUrl=\"$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar\"\n    else\n      jarUrl=\"https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar\"\n    fi\n    while IFS=\"=\" read key value; do\n      case \"$key\" in (wrapperUrl) jarUrl=\"$value\"; break ;;\n      esac\n    done < \"$BASE_DIR/.mvn/wrapper/maven-wrapper.properties\"\n    if [ \"$MVNW_VERBOSE\" = true ]; then\n      echo \"Downloading from: $jarUrl\"\n    fi\n    wrapperJarPath=\"$BASE_DIR/.mvn/wrapper/maven-wrapper.jar\"\n    if $cygwin; then\n      wrapperJarPath=`cygpath --path --windows \"$wrapperJarPath\"`\n    fi\n\n    if command -v wget > /dev/null; then\n        if [ \"$MVNW_VERBOSE\" = true ]; then\n          echo \"Found wget ... using wget\"\n        fi\n        if [ -z \"$MVNW_USERNAME\" ] || [ -z \"$MVNW_PASSWORD\" ]; then\n            wget \"$jarUrl\" -O \"$wrapperJarPath\"\n        else\n            wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD \"$jarUrl\" -O \"$wrapperJarPath\"\n        fi\n    elif command -v curl > /dev/null; then\n        if [ \"$MVNW_VERBOSE\" = true ]; then\n          echo \"Found curl ... using curl\"\n        fi\n        if [ -z \"$MVNW_USERNAME\" ] || [ -z \"$MVNW_PASSWORD\" ]; then\n            curl -o \"$wrapperJarPath\" \"$jarUrl\" -f\n        else\n            curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o \"$wrapperJarPath\" \"$jarUrl\" -f\n        fi\n\n    else\n        if [ \"$MVNW_VERBOSE\" = true ]; then\n          echo \"Falling back to using Java to download\"\n        fi\n        javaClass=\"$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java\"\n        # For Cygwin, switch paths to Windows format before running javac\n        if $cygwin; then\n          javaClass=`cygpath --path --windows \"$javaClass\"`\n        fi\n        if [ -e \"$javaClass\" ]; then\n            if [ ! -e \"$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class\" ]; then\n                if [ \"$MVNW_VERBOSE\" = true ]; then\n                  echo \" - Compiling MavenWrapperDownloader.java ...\"\n                fi\n                # Compiling the Java class\n                (\"$JAVA_HOME/bin/javac\" \"$javaClass\")\n            fi\n            if [ -e \"$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class\" ]; then\n                # Running the downloader\n                if [ \"$MVNW_VERBOSE\" = true ]; then\n                  echo \" - Running MavenWrapperDownloader.java ...\"\n                fi\n                (\"$JAVA_HOME/bin/java\" -cp .mvn/wrapper MavenWrapperDownloader \"$MAVEN_PROJECTBASEDIR\")\n            fi\n        fi\n    fi\nfi\n##########################################################################################\n# End of extension\n##########################################################################################\n\nexport MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-\"$BASE_DIR\"}\nif [ \"$MVNW_VERBOSE\" = true ]; then\n  echo $MAVEN_PROJECTBASEDIR\nfi\nMAVEN_OPTS=\"$(concat_lines \"$MAVEN_PROJECTBASEDIR/.mvn/jvm.config\") $MAVEN_OPTS\"\n\n# For Cygwin, switch paths to Windows format before running java\nif $cygwin; then\n  [ -n \"$M2_HOME\" ] &&\n    M2_HOME=`cygpath --path --windows \"$M2_HOME\"`\n  [ -n \"$JAVA_HOME\" ] &&\n    JAVA_HOME=`cygpath --path --windows \"$JAVA_HOME\"`\n  [ -n \"$CLASSPATH\" ] &&\n    CLASSPATH=`cygpath --path --windows \"$CLASSPATH\"`\n  [ -n \"$MAVEN_PROJECTBASEDIR\" ] &&\n    MAVEN_PROJECTBASEDIR=`cygpath --path --windows \"$MAVEN_PROJECTBASEDIR\"`\nfi\n\n# Provide a \"standardized\" way to retrieve the CLI args that will\n# work with both Windows and non-Windows executions.\nMAVEN_CMD_LINE_ARGS=\"$MAVEN_CONFIG $@\"\nexport MAVEN_CMD_LINE_ARGS\n\nWRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain\n\nexec \"$JAVACMD\" \\\n  $MAVEN_OPTS \\\n  -classpath \"$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar\" \\\n  \"-Dmaven.home=${M2_HOME}\" \"-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}\" \\\n  ${WRAPPER_LAUNCHER} $MAVEN_CONFIG \"$@\"\n"
  },
  {
    "path": "apps/pizza-operator/mvnw.cmd",
    "content": "@REM ----------------------------------------------------------------------------\n@REM Licensed to the Apache Software Foundation (ASF) under one\n@REM or more contributor license agreements.  See the NOTICE file\n@REM distributed with this work for additional information\n@REM regarding copyright ownership.  The ASF licenses this file\n@REM to you under the Apache License, Version 2.0 (the\n@REM \"License\"); you may not use this file except in compliance\n@REM with the License.  You may obtain a copy of the License at\n@REM\n@REM    http://www.apache.org/licenses/LICENSE-2.0\n@REM\n@REM Unless required by applicable law or agreed to in writing,\n@REM software distributed under the License is distributed on an\n@REM \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n@REM KIND, either express or implied.  See the License for the\n@REM specific language governing permissions and limitations\n@REM under the License.\n@REM ----------------------------------------------------------------------------\n\n@REM ----------------------------------------------------------------------------\n@REM Maven Start Up Batch script\n@REM\n@REM Required ENV vars:\n@REM JAVA_HOME - location of a JDK home dir\n@REM\n@REM Optional ENV vars\n@REM M2_HOME - location of maven2's installed home dir\n@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands\n@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending\n@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven\n@REM     e.g. to debug Maven itself, use\n@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000\n@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files\n@REM ----------------------------------------------------------------------------\n\n@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'\n@echo off\n@REM set title of command window\ntitle %0\n@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on'\n@if \"%MAVEN_BATCH_ECHO%\" == \"on\"  echo %MAVEN_BATCH_ECHO%\n\n@REM set %HOME% to equivalent of $HOME\nif \"%HOME%\" == \"\" (set \"HOME=%HOMEDRIVE%%HOMEPATH%\")\n\n@REM Execute a user defined script before this one\nif not \"%MAVEN_SKIP_RC%\" == \"\" goto skipRcPre\n@REM check for pre script, once with legacy .bat ending and once with .cmd ending\nif exist \"%HOME%\\mavenrc_pre.bat\" call \"%HOME%\\mavenrc_pre.bat\"\nif exist \"%HOME%\\mavenrc_pre.cmd\" call \"%HOME%\\mavenrc_pre.cmd\"\n:skipRcPre\n\n@setlocal\n\nset ERROR_CODE=0\n\n@REM To isolate internal variables from possible post scripts, we use another setlocal\n@setlocal\n\n@REM ==== START VALIDATION ====\nif not \"%JAVA_HOME%\" == \"\" goto OkJHome\n\necho.\necho Error: JAVA_HOME not found in your environment. >&2\necho Please set the JAVA_HOME variable in your environment to match the >&2\necho location of your Java installation. >&2\necho.\ngoto error\n\n:OkJHome\nif exist \"%JAVA_HOME%\\bin\\java.exe\" goto init\n\necho.\necho Error: JAVA_HOME is set to an invalid directory. >&2\necho JAVA_HOME = \"%JAVA_HOME%\" >&2\necho Please set the JAVA_HOME variable in your environment to match the >&2\necho location of your Java installation. >&2\necho.\ngoto error\n\n@REM ==== END VALIDATION ====\n\n:init\n\n@REM Find the project base dir, i.e. the directory that contains the folder \".mvn\".\n@REM Fallback to current working directory if not found.\n\nset MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%\nIF NOT \"%MAVEN_PROJECTBASEDIR%\"==\"\" goto endDetectBaseDir\n\nset EXEC_DIR=%CD%\nset WDIR=%EXEC_DIR%\n:findBaseDir\nIF EXIST \"%WDIR%\"\\.mvn goto baseDirFound\ncd ..\nIF \"%WDIR%\"==\"%CD%\" goto baseDirNotFound\nset WDIR=%CD%\ngoto findBaseDir\n\n:baseDirFound\nset MAVEN_PROJECTBASEDIR=%WDIR%\ncd \"%EXEC_DIR%\"\ngoto endDetectBaseDir\n\n:baseDirNotFound\nset MAVEN_PROJECTBASEDIR=%EXEC_DIR%\ncd \"%EXEC_DIR%\"\n\n:endDetectBaseDir\n\nIF NOT EXIST \"%MAVEN_PROJECTBASEDIR%\\.mvn\\jvm.config\" goto endReadAdditionalConfig\n\n@setlocal EnableExtensions EnableDelayedExpansion\nfor /F \"usebackq delims=\" %%a in (\"%MAVEN_PROJECTBASEDIR%\\.mvn\\jvm.config\") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a\n@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%\n\n:endReadAdditionalConfig\n\nSET MAVEN_JAVA_EXE=\"%JAVA_HOME%\\bin\\java.exe\"\nset WRAPPER_JAR=\"%MAVEN_PROJECTBASEDIR%\\.mvn\\wrapper\\maven-wrapper.jar\"\nset WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain\n\nset DOWNLOAD_URL=\"https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar\"\n\nFOR /F \"tokens=1,2 delims==\" %%A IN (\"%MAVEN_PROJECTBASEDIR%\\.mvn\\wrapper\\maven-wrapper.properties\") DO (\n    IF \"%%A\"==\"wrapperUrl\" SET DOWNLOAD_URL=%%B\n)\n\n@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central\n@REM This allows using the maven wrapper in projects that prohibit checking in binary data.\nif exist %WRAPPER_JAR% (\n    if \"%MVNW_VERBOSE%\" == \"true\" (\n        echo Found %WRAPPER_JAR%\n    )\n) else (\n    if not \"%MVNW_REPOURL%\" == \"\" (\n        SET DOWNLOAD_URL=\"%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar\"\n    )\n    if \"%MVNW_VERBOSE%\" == \"true\" (\n        echo Couldn't find %WRAPPER_JAR%, downloading it ...\n        echo Downloading from: %DOWNLOAD_URL%\n    )\n\n    powershell -Command \"&{\"^\n\t\t\"$webclient = new-object System.Net.WebClient;\"^\n\t\t\"if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {\"^\n\t\t\"$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');\"^\n\t\t\"}\"^\n\t\t\"[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')\"^\n\t\t\"}\"\n    if \"%MVNW_VERBOSE%\" == \"true\" (\n        echo Finished downloading %WRAPPER_JAR%\n    )\n)\n@REM End of extension\n\n@REM Provide a \"standardized\" way to retrieve the CLI args that will\n@REM work with both Windows and non-Windows executions.\nset MAVEN_CMD_LINE_ARGS=%*\n\n%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% \"-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%\" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*\nif ERRORLEVEL 1 goto error\ngoto end\n\n:error\nset ERROR_CODE=1\n\n:end\n@endlocal & set ERROR_CODE=%ERROR_CODE%\n\nif not \"%MAVEN_SKIP_RC%\" == \"\" goto skipRcPost\n@REM check for post script, once with legacy .bat ending and once with .cmd ending\nif exist \"%HOME%\\mavenrc_post.bat\" call \"%HOME%\\mavenrc_post.bat\"\nif exist \"%HOME%\\mavenrc_post.cmd\" call \"%HOME%\\mavenrc_post.cmd\"\n:skipRcPost\n\n@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'\nif \"%MAVEN_BATCH_PAUSE%\" == \"on\" pause\n\nif \"%MAVEN_TERMINATE_CMD%\" == \"on\" exit %ERROR_CODE%\n\nexit /B %ERROR_CODE%\n"
  },
  {
    "path": "apps/pizza-operator/pom.xml",
    "content": "<?xml version=\"1.0\"?>\n<project xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd\" xmlns=\"http://maven.apache.org/POM/4.0.0\"\n    xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n  <modelVersion>4.0.0</modelVersion>\n  <groupId>org.acme</groupId>\n  <artifactId>pizza-operator</artifactId>\n  <version>1.0.0-SNAPSHOT</version>\n  <properties>\n    <compiler-plugin.version>3.8.1</compiler-plugin.version>\n    <maven.compiler.parameters>true</maven.compiler.parameters>\n    <maven.compiler.source>11</maven.compiler.source>\n    <maven.compiler.target>11</maven.compiler.target>\n    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>\n    <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>\n    <quarkus-plugin.version>1.5.0.Final</quarkus-plugin.version>\n    <quarkus.platform.artifact-id>quarkus-universe-bom</quarkus.platform.artifact-id>\n    <quarkus.platform.group-id>io.quarkus</quarkus.platform.group-id>\n    <quarkus.platform.version>1.5.0.Final</quarkus.platform.version>\n    <surefire-plugin.version>2.22.1</surefire-plugin.version>\n  </properties>\n  <dependencyManagement>\n    <dependencies>\n      <dependency>\n        <groupId>${quarkus.platform.group-id}</groupId>\n        <artifactId>${quarkus.platform.artifact-id}</artifactId>\n        <version>${quarkus.platform.version}</version>\n        <type>pom</type>\n        <scope>import</scope>\n      </dependency>\n    </dependencies>\n  </dependencyManagement>\n  <dependencies>\n    <dependency>\n      <groupId>io.quarkus</groupId>\n      <artifactId>quarkus-resteasy</artifactId>\n    </dependency>\n    <dependency>\n      <groupId>io.quarkus</groupId>\n      <artifactId>quarkus-junit5</artifactId>\n      <scope>test</scope>\n    </dependency>\n    <dependency>\n      <groupId>io.rest-assured</groupId>\n      <artifactId>rest-assured</artifactId>\n      <scope>test</scope>\n    </dependency>\n    <dependency>\n      <groupId>io.quarkus</groupId>\n      <artifactId>quarkus-kubernetes-client</artifactId>\n    </dependency>\n  </dependencies>\n  <build>\n    <plugins>\n      <plugin>\n        <groupId>io.quarkus</groupId>\n        <artifactId>quarkus-maven-plugin</artifactId>\n        <version>${quarkus-plugin.version}</version>\n        <executions>\n          <execution>\n            <goals>\n              <goal>build</goal>\n            </goals>\n          </execution>\n        </executions>\n      </plugin>\n      <plugin>\n        <artifactId>maven-compiler-plugin</artifactId>\n        <version>${compiler-plugin.version}</version>\n      </plugin>\n      <plugin>\n        <artifactId>maven-surefire-plugin</artifactId>\n        <version>${surefire-plugin.version}</version>\n        <configuration>\n          <systemPropertyVariables>\n            <java.util.logging.manager>org.jboss.logmanager.LogManager</java.util.logging.manager>\n          </systemPropertyVariables>\n        </configuration>\n      </plugin>\n    </plugins>\n  </build>\n  <profiles>\n    <profile>\n      <id>native</id>\n      <activation>\n        <property>\n          <name>native</name>\n        </property>\n      </activation>\n      <build>\n        <plugins>\n          <plugin>\n            <artifactId>maven-failsafe-plugin</artifactId>\n            <version>${surefire-plugin.version}</version>\n            <executions>\n              <execution>\n                <goals>\n                  <goal>integration-test</goal>\n                  <goal>verify</goal>\n                </goals>\n                <configuration>\n                  <systemPropertyVariables>\n                    <native.image.path>${project.build.directory}/${project.build.finalName}-runner</native.image.path>\n                  </systemPropertyVariables>\n                </configuration>\n              </execution>\n            </executions>\n          </plugin>\n        </plugins>\n      </build>\n      <properties>\n        <quarkus.package.type>native</quarkus.package.type>\n      </properties>\n    </profile>\n  </profiles>\n</project>\n"
  },
  {
    "path": "apps/pizza-operator/src/main/docker/Dockerfile.jvm",
    "content": "####\n# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode\n#\n# Before building the docker image run:\n#\n# mvn package\n#\n# Then, build the image with:\n#\n# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/pizza-operator-jvm .\n#\n# Then run the container using:\n#\n# docker run -i --rm -p 8080:8080 quarkus/pizza-operator-jvm\n#\n# If you want to include the debug port into your docker image\n# you will have to expose the debug port (default 5005) like this :  EXPOSE 8080 5050\n# \n# Then run the container using : \n#\n# docker run -i --rm -p 8080:8080 -p 5005:5005 -e JAVA_ENABLE_DEBUG=\"true\" quarkus/pizza-operator-jvm\n#\n###\nFROM registry.access.redhat.com/ubi8/ubi-minimal:8.1\n\nARG JAVA_PACKAGE=java-11-openjdk-headless\nARG RUN_JAVA_VERSION=1.3.8\n\nENV LANG='en_US.UTF-8' LANGUAGE='en_US:en'\n\n# Install java and the run-java script\n# Also set up permissions for user `1001`\nRUN microdnf install curl ca-certificates ${JAVA_PACKAGE} \\\n    && microdnf update \\\n    && microdnf clean all \\\n    && mkdir /deployments \\\n    && chown 1001 /deployments \\\n    && chmod \"g+rwX\" /deployments \\\n    && chown 1001:root /deployments \\\n    && curl https://repo1.maven.org/maven2/io/fabric8/run-java-sh/${RUN_JAVA_VERSION}/run-java-sh-${RUN_JAVA_VERSION}-sh.sh -o /deployments/run-java.sh \\\n    && chown 1001 /deployments/run-java.sh \\\n    && chmod 540 /deployments/run-java.sh \\\n    && echo \"securerandom.source=file:/dev/urandom\" >> /etc/alternatives/jre/lib/security/java.security\n\n# Configure the JAVA_OPTIONS, you can add -XshowSettings:vm to also display the heap size.\nENV JAVA_OPTIONS=\"-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager\"\n\nCOPY target/lib/* /deployments/lib/\nCOPY target/*-runner.jar /deployments/app.jar\n\nEXPOSE 8080\nUSER 1001\n\nENTRYPOINT [ \"/deployments/run-java.sh\" ]"
  },
  {
    "path": "apps/pizza-operator/src/main/docker/Dockerfile.native",
    "content": "####\n# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode\n#\n# Before building the docker image run:\n#\n# mvn package -Pnative -Dquarkus.native.container-build=true\n#\n# Then, build the image with:\n#\n# docker build -f src/main/docker/Dockerfile.native -t quarkus/pizza-operator .\n#\n# Then run the container using:\n#\n# docker run -i --rm -p 8080:8080 quarkus/pizza-operator\n#\n###\nFROM registry.access.redhat.com/ubi8/ubi-minimal:8.1\nWORKDIR /work/\nCOPY --chown=1001:root target/*-runner /work/application\n\nEXPOSE 8080\nUSER 1001\n\nCMD [\"./application\", \"-Dquarkus.http.host=0.0.0.0\"]"
  },
  {
    "path": "apps/pizza-operator/src/main/java/org/acme/ExampleResource.java",
    "content": "package org.acme;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n@Path(\"/hello\")\npublic class ExampleResource {\n\n    @GET\n    @Produces(MediaType.TEXT_PLAIN)\n    public String hello() {\n        return \"hello\";\n    }\n}"
  },
  {
    "path": "apps/pizza-operator/src/main/java/org/acme/KubernetesClientProducer.java",
    "content": "package org.acme;\n\nimport io.fabric8.kubernetes.api.model.apiextensions.CustomResourceDefinition;\nimport io.fabric8.kubernetes.client.DefaultKubernetesClient;\nimport io.fabric8.kubernetes.client.KubernetesClient;\nimport io.fabric8.kubernetes.client.dsl.NonNamespaceOperation;\nimport io.fabric8.kubernetes.client.dsl.Resource;\nimport io.fabric8.kubernetes.internal.KubernetesDeserializer;\nimport java.io.IOException;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\nimport javax.enterprise.inject.Produces;\nimport javax.inject.Named;\nimport javax.inject.Singleton;\n\npublic class KubernetesClientProducer {\n\n    @Produces\n    @Singleton\n    @Named(\"namespace\")\n    String findMyCurrentNamespace() throws IOException {\n        return new\n            String(Files.readAllBytes(Paths.get(\"/var/run/secrets/kubernetes.io/serviceaccount/namespace\")));\n    }\n\n    @Produces\n    @Singleton\n    KubernetesClient makeDefaultClient(@Named(\"namespace\") String namespace) {\n        return new DefaultKubernetesClient().inNamespace(namespace);\n    }\n\n    @Produces\n    @Singleton\n    NonNamespaceOperation<PizzaResource, PizzaResourceList, PizzaResourceDoneable, Resource<PizzaResource, PizzaResourceDoneable>>\n    makeCustomHelloResourceClient(KubernetesClient defaultClient, @Named(\"namespace\") String namespace) {\n\n        KubernetesDeserializer.registerCustomKind(\"mykubernetes.acme.org/v1beta2\", \"Pizza\", PizzaResource.class);\n\n        CustomResourceDefinition crd = defaultClient.customResourceDefinitions()\n        .list()\n        .getItems()\n        .stream()\n        .filter(d -> \"pizzas.mykubernetes.acme.org\".equals(d.getMetadata().getName()))\n        .findAny()\n            .orElseThrow(() -> new RuntimeException(\"Deployment error: Custom resource definition mykubernetes.acme.org/v1beta2 not found.\"));\n            \n        return defaultClient.customResources(crd, PizzaResource.class, PizzaResourceList.class, PizzaResourceDoneable.class).inNamespace(namespace);\n\n    }\n\n}"
  },
  {
    "path": "apps/pizza-operator/src/main/java/org/acme/PizzaResource.java",
    "content": "package org.acme;\n\nimport com.fasterxml.jackson.databind.annotation.JsonDeserialize;\nimport io.fabric8.kubernetes.client.CustomResource;\n\n@JsonDeserialize\npublic class PizzaResource extends CustomResource {\n\n    private PizzaResourceSpec spec;\n    private PizzaResourceStatus status;\n    // getters/setters\n\n    public PizzaResourceSpec getSpec() {\n        return spec;\n    }\n\n    public void setSpec(PizzaResourceSpec spec) {\n        this.spec = spec;\n    }\n\n    public PizzaResourceStatus getStatus() {\n        return status;\n    }\n\n    public void setStatus(PizzaResourceStatus status) {\n        this.status = status;\n    }\n\n    @Override\n    public String toString() {\n        String name = getMetadata() != null ? getMetadata().getName() : \"unknown\";\n        String version = getMetadata() != null ? getMetadata().getResourceVersion() : \"unknown\";\n        return \"name=\" + name + \" version=\" + version + \" value=\" + spec;\n    }\n}"
  },
  {
    "path": "apps/pizza-operator/src/main/java/org/acme/PizzaResourceDoneable.java",
    "content": "package org.acme;\n\nimport io.fabric8.kubernetes.api.builder.Function;\nimport io.fabric8.kubernetes.client.CustomResourceDoneable;\n\npublic class PizzaResourceDoneable extends CustomResourceDoneable<PizzaResource> {\n\n    public PizzaResourceDoneable(PizzaResource resource, Function<PizzaResource, PizzaResource> function) {\n        super(resource, function);\n    }\n}"
  },
  {
    "path": "apps/pizza-operator/src/main/java/org/acme/PizzaResourceList.java",
    "content": "package org.acme;\n\nimport com.fasterxml.jackson.databind.annotation.JsonSerialize;\nimport io.fabric8.kubernetes.client.CustomResourceList;\n\n@JsonSerialize\npublic class PizzaResourceList extends CustomResourceList<PizzaResource> {\n\n}"
  },
  {
    "path": "apps/pizza-operator/src/main/java/org/acme/PizzaResourceSpec.java",
    "content": "package org.acme;\n\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.databind.annotation.JsonDeserialize;\nimport io.quarkus.runtime.annotations.RegisterForReflection;\nimport java.util.ArrayList;\nimport java.util.List;\n\n@JsonDeserialize\n@RegisterForReflection\npublic class PizzaResourceSpec {\n\n    @JsonProperty(\"toppings\")\n    private List<String> toppings = new ArrayList<>();\n    @JsonProperty(\"sauce\")\n    private String sauce;\n    // getters/setters\n\n    public List<String> getToppings() {\n        return toppings;\n    }\n\n    public void setToppings(List<String> toppings) {\n        this.toppings = toppings;\n    }\n\n    public String getSauce() {\n        return sauce;\n    }\n\n    public void setSauce(String sauce) {\n        this.sauce = sauce;\n    }\n}"
  },
  {
    "path": "apps/pizza-operator/src/main/java/org/acme/PizzaResourceStatus.java",
    "content": "package org.acme;\n\nimport com.fasterxml.jackson.databind.annotation.JsonDeserialize;\n\n@JsonDeserialize\npublic class PizzaResourceStatus {\n\n}"
  },
  {
    "path": "apps/pizza-operator/src/main/java/org/acme/PizzaResourceWatcher.java",
    "content": "package org.acme;\n\nimport io.fabric8.kubernetes.api.model.ContainerBuilder;\nimport io.fabric8.kubernetes.api.model.ObjectMetaBuilder;\nimport io.fabric8.kubernetes.api.model.Pod;\nimport io.fabric8.kubernetes.api.model.PodBuilder;\nimport io.fabric8.kubernetes.api.model.PodSpecBuilder;\nimport io.fabric8.kubernetes.client.KubernetesClient;\nimport io.fabric8.kubernetes.client.KubernetesClientException;\nimport io.fabric8.kubernetes.client.Watcher;\nimport io.fabric8.kubernetes.client.dsl.NonNamespaceOperation;\nimport io.fabric8.kubernetes.client.dsl.Resource;\nimport io.quarkus.runtime.StartupEvent;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\nimport javax.enterprise.event.Observes;\nimport javax.inject.Inject;\n\npublic class PizzaResourceWatcher {\n\n    @Inject\n    KubernetesClient defaultClient;\n\n    @Inject\n    NonNamespaceOperation<PizzaResource, PizzaResourceList, PizzaResourceDoneable, Resource<PizzaResource, PizzaResourceDoneable>> crClient;\n\n    void onStartup(@Observes StartupEvent event) {\n        System.out.println(\"Startup\");\n        crClient.watch(new Watcher<PizzaResource>() { //<.>\n            @Override\n            public void eventReceived(Action action, PizzaResource resource) {\n                System.out.println(\"Event \" + action.name());\n                if (action == Action.ADDED) {\n                    final String app = resource.getMetadata().getName();\n                    final String sauce = resource.getSpec().getSauce();\n                    final List<String> toppings = resource.getSpec().getToppings();\n                    final Map<String, String> labels = new HashMap<>();\n                    labels.put(\"app\", app);\n                    final ObjectMetaBuilder objectMetaBuilder = new ObjectMetaBuilder().withName(app + \"-pod\")\n                            .withNamespace(resource.getMetadata().getNamespace()).withLabels(labels);\n                    final ContainerBuilder containerBuilder = new ContainerBuilder().withName(\"pizza-maker\")\n                            .withImage(\"quay.io/lordofthejars/pizza-maker:1.0.0\").withCommand(\"/work/application\")\n                            .withArgs(\"--sauce=\" + sauce, \"--toppings=\" + String.join(\",\", toppings));\n                    final PodSpecBuilder podSpecBuilder = new PodSpecBuilder().withContainers(containerBuilder.build())\n                            .withRestartPolicy(\"Never\");\n                    final PodBuilder podBuilder = new PodBuilder().withMetadata(objectMetaBuilder.build())\n                            .withSpec(podSpecBuilder.build());\n                    final Pod pod = podBuilder.build();\n                    defaultClient.resource(pod).createOrReplace();\n\n                }\n            }\n\n            @Override\n            public void onClose(KubernetesClientException e) {\n            }\n        });\n    }\n\n}"
  },
  {
    "path": "apps/pizza-operator/src/main/resources/META-INF/resources/index.html",
    "content": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n    <meta charset=\"UTF-8\">\n    <title>pizza-operator - 1.0.0-SNAPSHOT</title>\n    <style>\n        h1, h2, h3, h4, h5, h6 {\n            margin-bottom: 0.5rem;\n            font-weight: 400;\n            line-height: 1.5;\n        }\n\n        h1 {\n            font-size: 2.5rem;\n        }\n\n        h2 {\n            font-size: 2rem\n        }\n\n        h3 {\n            font-size: 1.75rem\n        }\n\n        h4 {\n            font-size: 1.5rem\n        }\n\n        h5 {\n            font-size: 1.25rem\n        }\n\n        h6 {\n            font-size: 1rem\n        }\n\n        .lead {\n            font-weight: 300;\n            font-size: 2rem;\n        }\n\n        .banner {\n            font-size: 2.7rem;\n            margin: 0;\n            padding: 2rem 1rem;\n            background-color: #00A1E2;\n            color: white;\n        }\n\n        body {\n            margin: 0;\n            font-family: -apple-system, system-ui, \"Segoe UI\", Roboto, \"Helvetica Neue\", Arial, sans-serif, \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\", \"Noto Color Emoji\";\n        }\n\n        code {\n            font-family: SFMono-Regular, Menlo, Monaco, Consolas, \"Liberation Mono\", \"Courier New\", monospace;\n            font-size: 87.5%;\n            color: #e83e8c;\n            word-break: break-word;\n        }\n\n        .left-column {\n            padding: .75rem;\n            max-width: 75%;\n            min-width: 55%;\n        }\n\n        .right-column {\n            padding: .75rem;\n            max-width: 25%;\n        }\n\n        .container {\n            display: flex;\n            width: 100%;\n        }\n\n        li {\n            margin: 0.75rem;\n        }\n\n        .right-section {\n            margin-left: 1rem;\n            padding-left: 0.5rem;\n        }\n\n        .right-section h3 {\n            padding-top: 0;\n            font-weight: 200;\n        }\n\n        .right-section ul {\n            border-left: 0.3rem solid #00A1E2;\n            list-style-type: none;\n            padding-left: 0;\n        }\n\n    </style>\n</head>\n<body>\n\n<div class=\"banner lead\">\n    Your new Cloud-Native application is ready!\n</div>\n\n<div class=\"container\">\n    <div class=\"left-column\">\n        <p class=\"lead\"> Congratulations, you have created a new Quarkus application.</p>\n\n        <h2>Why do you see this?</h2>\n\n        <p>This page is served by Quarkus. The source is in\n            <code>src/main/resources/META-INF/resources/index.html</code>.</p>\n\n        <h2>What can I do from here?</h2>\n\n        <p>If not already done, run the application in <em>dev mode</em> using: <code>mvn compile quarkus:dev</code>.\n        </p>\n        <ul>\n            <li>Add REST resources, Servlets, functions and other services in <code>src/main/java</code>.</li>\n            <li>Your static assets are located in <code>src/main/resources/META-INF/resources</code>.</li>\n            <li>Configure your application in <code>src/main/resources/application.properties</code>.\n            </li>\n        </ul>\n\n        <h2>Do you like Quarkus?</h2>\n        <p>Go give it a star on <a href=\"https://github.com/quarkusio/quarkus\">GitHub</a>.</p>\n\n        <h2>How do I get rid of this page?</h2>\n        <p>Just delete the <code>src/main/resources/META-INF/resources/index.html</code> file.</p>\n    </div>\n    <div class=\"right-column\">\n        <div class=\"right-section\">\n            <h3>Application</h3>\n            <ul>\n                <li>GroupId: org.acme</li>\n                <li>ArtifactId: pizza-operator</li>\n                <li>Version: 1.0.0-SNAPSHOT</li>\n                <li>Quarkus Version: 1.5.0.Final</li>\n            </ul>\n        </div>\n        <div class=\"right-section\">\n            <h3>Next steps</h3>\n            <ul>\n                <li><a href=\"https://quarkus.io/guides/maven-tooling.html\" target=\"_blank\">Setup your IDE</a></li>\n                <li><a href=\"https://quarkus.io/guides/getting-started.html\" target=\"_blank\">Getting started</a></li>\n                <li><a href=\"https://quarkus.io\" target=\"_blank\">Quarkus Web Site</a></li>\n            </ul>\n        </div>\n    </div>\n</div>\n\n\n</body>\n</html>"
  },
  {
    "path": "apps/pizza-operator/src/main/resources/application.properties",
    "content": ""
  },
  {
    "path": "apps/pizza-operator/src/test/java/org/acme/ExampleResourceTest.java",
    "content": "package org.acme;\n\nimport io.quarkus.test.junit.QuarkusTest;\nimport org.junit.jupiter.api.Test;\n\nimport static io.restassured.RestAssured.given;\nimport static org.hamcrest.CoreMatchers.is;\n\n@QuarkusTest\npublic class ExampleResourceTest {\n\n    @Test\n    public void testHelloEndpoint() {\n        given()\n          .when().get(\"/hello\")\n          .then()\n             .statusCode(200)\n             .body(is(\"hello\"));\n    }\n\n}"
  },
  {
    "path": "apps/pizza-operator/src/test/java/org/acme/NativeExampleResourceIT.java",
    "content": "package org.acme;\n\nimport io.quarkus.test.junit.NativeImageTest;\n\n@NativeImageTest\npublic class NativeExampleResourceIT extends ExampleResourceTest {\n\n    // Execute the same tests but in native mode.\n}"
  },
  {
    "path": "apps/pizzas/cheese-pizza.yaml",
    "content": "apiVersion: mykubernetes.acme.org/v1\nkind: Pizza\nmetadata:\n  name: cheesep\nspec:\n  toppings:\n  - mozzarella\n  sauce: regular\n"
  },
  {
    "path": "apps/pizzas/meat-pizza.yaml",
    "content": "apiVersion: mykubernetes.acme.org/v1\nkind: Pizza\nmetadata:\n  name: meatsp\nspec:\n  toppings:\n  - mozzarella\n  - pepperoni\n  - sausage\n  - bacon\n  sauce: extra"
  },
  {
    "path": "apps/pizzas/pizza-crd.yaml",
    "content": "apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  name: pizzas.mykubernetes.acme.org\n  labels:\n    app: pizzamaker\n    mylabel: stuff\nspec:\n  group: mykubernetes.acme.org\n  scope: Namespaced\n  versions:\n  - name: v1\n    served: true\n    storage: true\n    schema:\n      openAPIV3Schema:\n        description: \"A custom resource for making yummy pizzas\" #<.>\n        type: object\n        properties:\n          spec:\n            type: object\n            description: \"Information about our pizza\"\n            properties:\n              toppings: #<.>\n                type: array\n                items:\n                  type: string\n                description: \"List of toppings for our pizza\"\n              sauce: #<.>\n                type: string\n                description: \"The name of the sauce to use on our pizza\"\n  names:\n    kind: Pizza #<.>\n    listKind: PizzaList\n    plural: pizzas\n    singular: pizza\n    shortNames:\n    - pz"
  },
  {
    "path": "apps/pizzas/pizza-deployment.yaml",
    "content": "apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: quarkus-operator-example\nrules:\n- apiGroups:\n  - ''\n  resources:\n  - pods\n  verbs:\n  - get\n  - list\n  - watch\n  - create\n  - update\n  - delete\n  - patch\n- apiGroups:\n  - apiextensions.k8s.io\n  resources:\n  - customresourcedefinitions\n  verbs:\n  - list\n- apiGroups:\n  - mykubernetes.acme.org\n  resources:\n  - pizzas\n  verbs:\n  - list\n  - watch\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: quarkus-operator-example\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: quarkus-operator-example\nsubjects:\n- kind: ServiceAccount\n  name: quarkus-operator-example\n  namespace: pizzahat\nroleRef:\n  kind: ClusterRole\n  name: quarkus-operator-example\n  apiGroup: rbac.authorization.k8s.io\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: quarkus-operator-example\nspec:\n  selector:\n    matchLabels:\n      app: quarkus-operator-example\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: quarkus-operator-example\n    spec:\n      serviceAccountName: quarkus-operator-example\n      containers:\n      - image: quay.io/rhdevelopers/pizza-operator:1.0.1\n        name: quarkus-operator-example\n        imagePullPolicy: IfNotPresent"
  },
  {
    "path": "apps/pizzas/veggie-lovers.yaml",
    "content": "apiVersion: mykubernetes.acme.org/v1\nkind: Pizza\nmetadata:\n  name: veggiep\nspec:\n  toppings:\n  - mozzarella\n  - black olives\n  sauce: extra\n"
  },
  {
    "path": "bin/build-site.sh",
    "content": "#!/bin/sh\ndocker run -u $(id -u) -v $PWD:/antora:Z --rm -t antora/antora:2.3.1 --cache-dir=./.cache/antora github-pages.yml\n"
  },
  {
    "path": "documentation/antora.yml",
    "content": "name: kubernetes-tutorial\nversion: v1.34\ndisplay_version: v1.34\nprerelease: false\nnav:\n  - modules/ROOT/nav.adoc\nstart_page: ROOT:index.adoc\n"
  },
  {
    "path": "documentation/modules/ROOT/examples/PizzaResourceWatcher.java",
    "content": "package org.acme;\n\nimport io.fabric8.kubernetes.api.model.ContainerBuilder;\nimport io.fabric8.kubernetes.api.model.ObjectMetaBuilder;\nimport io.fabric8.kubernetes.api.model.Pod;\nimport io.fabric8.kubernetes.api.model.PodBuilder;\nimport io.fabric8.kubernetes.api.model.PodSpecBuilder;\nimport io.fabric8.kubernetes.client.KubernetesClient;\nimport io.fabric8.kubernetes.client.KubernetesClientException;\nimport io.fabric8.kubernetes.client.Watcher;\nimport io.fabric8.kubernetes.client.dsl.NonNamespaceOperation;\nimport io.fabric8.kubernetes.client.dsl.Resource;\nimport io.quarkus.runtime.StartupEvent;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\nimport javax.enterprise.event.Observes;\nimport javax.inject.Inject;\n\npublic class PizzaResourceWatcher {\n\n    @Inject\n    KubernetesClient defaultClient;\n\n    @Inject\n    NonNamespaceOperation<PizzaResource, PizzaResourceList, PizzaResourceDoneable, Resource<PizzaResource, PizzaResourceDoneable>> crClient;\n\n    void onStartup(@Observes StartupEvent event) {\n        System.out.println(\"Startup\");\n        crClient.watch(new Watcher<PizzaResource>() { //<.>\n            @Override\n            public void eventReceived(Action action, PizzaResource resource) {\n                System.out.println(\"Event \" + action.name());\n                if (action == Action.ADDED) {\n                    final String app = resource.getMetadata().getName();\n                    final String sauce = resource.getSpec().getSauce();\n                    final List<String> toppings = resource.getSpec().getToppings();\n                    final Map<String, String> labels = new HashMap<>();\n                    labels.put(\"app\", app);\n                    final ObjectMetaBuilder objectMetaBuilder = new ObjectMetaBuilder().withName(app + \"-pod\")\n                            .withNamespace(resource.getMetadata().getNamespace()).withLabels(labels);\n                    final ContainerBuilder containerBuilder = new ContainerBuilder().withName(\"pizza-maker\")\n                            .withImage(\"quay.io/lordofthejars/pizza-maker:1.0.0\").withCommand(\"/work/application\")\n                            .withArgs(\"--sauce=\" + sauce, \"--toppings=\" + String.join(\",\", toppings));\n                    final PodSpecBuilder podSpecBuilder = new PodSpecBuilder().withContainers(containerBuilder.build())\n                            .withRestartPolicy(\"Never\");\n                    final PodBuilder podBuilder = new PodBuilder().withMetadata(objectMetaBuilder.build())\n                            .withSpec(podSpecBuilder.build());\n                    final Pod pod = podBuilder.build();\n                    defaultClient.resource(pod).createOrReplace();\n\n                }\n            }\n\n            @Override\n            public void onClose(KubernetesClientException e) {\n            }\n        });\n    }\n\n}"
  },
  {
    "path": "documentation/modules/ROOT/examples/cheese-pizza.yaml",
    "content": ""
  },
  {
    "path": "documentation/modules/ROOT/examples/meat-pizza.yaml",
    "content": "apiVersion: mykubernetes.acme.org/v1\nkind: Pizza\nmetadata:\n  name: meatsp\nspec:\n  toppings:\n  - mozzarella\n  - pepperoni\n  - sausage\n  - bacon\n  sauce: extra"
  },
  {
    "path": "documentation/modules/ROOT/examples/myboot-deployment-configuration-secret.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: myboot\n  name: myboot\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot\n  template:\n    metadata:\n      labels:\n        app: myboot\n    spec:\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n        ports:\n          - containerPort: 8080\n        volumeMounts:          \n          - name: mysecretvolume #<.>\n            mountPath: /mystuff/secretstuff\n            readOnly: true\n        resources:\n          requests: \n            memory: \"300Mi\" \n            cpu: \"250m\" # 1/4 core\n          limits:\n            memory: \"400Mi\"\n            cpu: \"1000m\" # 1 core\n      volumes:\n        - name: mysecretvolume #<.>\n          secret:\n            secretName: mysecret\n"
  },
  {
    "path": "documentation/modules/ROOT/examples/myboot-deployment-live-ready-aggressive.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: myboot\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot\n  template:\n    metadata:\n      labels:\n        app: myboot\n        env: dev\n    spec:\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n        imagePullPolicy: Always\n        ports:\n        - containerPort: 8080\n        resources:\n          requests:\n            memory: \"300Mi\"\n            cpu: \"250m\" # 1/4 core\n          limits:\n            memory: \"400Mi\"\n            cpu: \"1000m\" # 1 core\n        livenessProbe:\n          httpGet:\n              port: 8080\n              path: /alive\n          periodSeconds: 2\n          timeoutSeconds: 2\n          failureThreshold: 2\n        readinessProbe:\n          httpGet:\n            path: /health\n            port: 8080\n          periodSeconds: 3"
  },
  {
    "path": "documentation/modules/ROOT/examples/myboot-deployment-live-ready.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: myboot\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot\n  template:\n    metadata:\n      labels:\n        app: myboot\n        env: dev\n    spec:\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n        imagePullPolicy: Always\n        ports:\n        - containerPort: 8080\n        resources:\n          requests:\n            memory: \"300Mi\"\n            cpu: \"250m\" # 1/4 core\n          limits:\n            memory: \"400Mi\"\n            cpu: \"1000m\" # 1 core\n        livenessProbe:\n          httpGet:\n              port: 8080\n              path: /alive\n          initialDelaySeconds: 10\n          periodSeconds: 5\n          timeoutSeconds: 2\n        readinessProbe:\n          httpGet:  \n            path: /health\n            port: 8080\n          initialDelaySeconds: 10\n          periodSeconds: 3\n"
  },
  {
    "path": "documentation/modules/ROOT/examples/myboot-deployment-startup-live-ready.yml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: myboot\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: myboot\n  template:\n    metadata:\n      labels:\n        app: myboot\n        env: dev\n    spec:\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n        imagePullPolicy: Always\n        ports:\n        - containerPort: 8080\n        resources:\n          requests:\n            memory: \"300Mi\"\n            cpu: \"250m\" # 1/4 core\n          limits:\n            memory: \"400Mi\"\n            cpu: \"1000m\" # 1 core\n        livenessProbe:\n          httpGet:\n              port: 8080\n              path: /alive\n          periodSeconds: 2\n          timeoutSeconds: 2\n          failureThreshold: 2\n        readinessProbe:\n          httpGet:\n            path: /health\n            port: 8080\n          periodSeconds: 3\n        startupProbe:\n          httpGet:\n            path: /alive\n            port: 8080\n          failureThreshold: 6\n          periodSeconds: 5\n          timeoutSeconds: 1"
  },
  {
    "path": "documentation/modules/ROOT/examples/myboot-pod-volume-hostpath.yaml",
    "content": "apiVersion: v1\nkind: Pod\nmetadata:\n  name: myboot-demo\nspec:\n  containers:\n  - name: myboot-demo\n    image: quay.io/rhdevelopers/myboot:v4\n    \n    volumeMounts:\n    - mountPath: /tmp/demo\n      name: demo-volume\n\n  volumes:\n  - name: demo-volume\n    hostPath: #<.> \n      path: \"/mnt/data\" #<.>\n"
  },
  {
    "path": "documentation/modules/ROOT/examples/myboot-pod-volume.yml",
    "content": "apiVersion: v1\nkind: Pod #<.>\nmetadata:\n  name: myboot-demo\nspec:\n  containers:\n  - name: myboot-demo\n    image: quay.io/rhdevelopers/myboot:v4\n    \n    volumeMounts:\n    - mountPath: /tmp/demo #<.>\n      name: demo-volume #<.> \n\n  volumes:\n  - name: demo-volume\n    emptyDir: {}\n"
  },
  {
    "path": "documentation/modules/ROOT/examples/myboot-pods-volume.yml",
    "content": "apiVersion: v1\nkind: Pod\nmetadata:\n  name: myboot-demo\nspec:\n  containers:\n  - name: myboot-demo-1 #<.>\n    image: quay.io/rhdevelopers/myboot:v4\n    volumeMounts:\n    - mountPath: /tmp/demo\n      name: demo-volume\n\n  - name: myboot-demo-2 #<.>\n    image: quay.io/rhdevelopers/myboot:v4 #<.>\n\n    env:\n    - name: SERVER_PORT #<.>\n      value: \"8090\"\n\n    volumeMounts:\n    - mountPath: /tmp/demo\n      name: demo-volume\n\n  volumes:\n  - name: demo-volume #<.>\n    emptyDir: {}\n"
  },
  {
    "path": "documentation/modules/ROOT/examples/pizza-crd.yaml",
    "content": "apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  name: pizzas.mykubernetes.acme.org\n  labels:\n    app: pizzamaker\n    mylabel: stuff\nspec:\n  group: mykubernetes.acme.org\n  scope: Namespaced\n  versions:\n  - name: v1\n    served: true\n    storage: true\n    schema:\n      openAPIV3Schema:\n        description: \"A custom resource for making yummy pizzas\" #<.>\n        type: object\n        properties:\n          spec:\n            type: object\n            description: \"Information about our pizza\"\n            properties:\n              toppings: #<.>\n                type: array\n                items:\n                  type: string\n                description: \"List of toppings for our pizza\"\n              sauce: #<.>\n                type: string\n                description: \"The name of the sauce to use on our pizza\"\n  names:\n    kind: Pizza #<.>\n    listKind: PizzaList\n    plural: pizzas\n    singular: pizza\n    shortNames:\n    - pz"
  },
  {
    "path": "documentation/modules/ROOT/examples/quarkus-statefulset-external-svc.yaml",
    "content": "apiVersion: v1\nkind: Service\nmetadata:\n  name: quarkus-statefulset-2\nspec:\n  type: LoadBalancer #<.>\n  externalTrafficPolicy: Local #<.>\n  selector:\n    statefulset.kubernetes.io/pod-name: quarkus-statefulset-2 #<.>\n  ports:\n  - port: 8080\n    name: web"
  },
  {
    "path": "documentation/modules/ROOT/examples/whalesay-cronjob.yaml",
    "content": "apiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: whale-say-cronjob\nspec:\n  schedule: \"* * * * *\" #<.>\n  jobTemplate:                   \n    spec:                        \n      template:    \n        metadata:\n          labels:\n            job-type: whale-say #<.>              \n        spec:\n          containers:\n          - name: whale-say-container\n            image: docker/whalesay\n            command: [\"cowsay\",\"Hello DevNation\"]\n          restartPolicy: Never"
  },
  {
    "path": "documentation/modules/ROOT/examples/whalesay-job.yaml",
    "content": "apiVersion: batch/v1\nkind: Job\nmetadata:\n  name: whale-say-job #<.>\nspec:\n  template:\n    spec:\n      containers:\n      - name: whale-say-container\n        image: docker/whalesay\n        command: [\"cowsay\",\"Hello DevNation\"]\n      restartPolicy: Never"
  },
  {
    "path": "documentation/modules/ROOT/nav.adoc",
    "content": "* 1. Requirements\n** xref:installation.adoc[Installation]\n*** xref:installation.adoc#tutorial-all-local[CLI]\n*** xref:installation.adoc#install-minikube[Install Minikube]\n*** xref:installation.adoc#start-kubernetes[Start Kubernetes]\n\n* 2. Beginner\n** xref:kubectl.adoc[kubectl]\n** xref:pod-rs-deployment.adoc[Pod, ReplicaSet, Deployment]\n** xref:service.adoc[Service]\n** xref:logs.adoc[Logs]\n** xref:service-magic.adoc[Service Magic]\n** xref:blue-green.adoc[Blue/Green Deployments]\n\n* 3. Elementary\n** xref:building-images.adoc[Building Images]\n** xref:resources.adoc[Resources and Limits]\n** xref:rolling-updates.adoc[Rolling updates]\n** xref:live-ready.adoc[Liveness, Readiness & Startup]\n** xref:configmap.adoc[ConfigMap]\n\n* 4. Intermediate\n** xref:secrets.adoc[Secrets]\n** xref:crds.adoc[Operators]\n** xref:volumes-persistentvolumes.adoc[Volumes]\n** xref:taints-affinity.adoc[Taints & Affinity]\n** xref::jobs-cronjobs.adoc[Jobs & CronJobs]\n** xref::daemonset.adoc[DaemonSet]\n** xref::statefulset.adoc[StatefulSet]\n\n* 5. Advanced\n** xref:ingress.adoc[Ingress]-"
  },
  {
    "path": "documentation/modules/ROOT/pages/_attributes.adoc",
    "content": ":moduledir: ..\n:branch: master\n:github-repo: https://github.com/redhat-scholars/kubernetes-tutorial\n:openshift-version: 4.3\n:vm-driver: virtualbox\n:profile: devnation\n:curl-loop-sleep-time: .3"
  },
  {
    "path": "documentation/modules/ROOT/pages/_partials/affinity_label.adoc",
    "content": "// tag::openshift[]\n:chosen-node: ip-10-0-175-64.eu-central-1.compute.internal\n// end::openshift[]\n// tag::minikube[]\n:chosen-node: devnation-m02\n// end::minikube[]\n\nGet a list of nodes:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get nodes\n----\n\n[.console-output]\n[source,bash,subs=\"+attributes,+quotes\"]\n----\nNAME                                            STATUS   ROLES    AGE   VERSION\n# tag::openshift[]\nip-10-0-136-107.eu-central-1.compute.internal   Ready    master   26h   v1.16.2\nip-10-0-140-186.eu-central-1.compute.internal   Ready    worker   26h   v1.16.2\nip-10-0-141-128.eu-central-1.compute.internal   Ready    worker   25h   v1.16.2\nip-10-0-146-109.eu-central-1.compute.internal   Ready    worker   25h   v1.16.2\nip-10-0-150-226.eu-central-1.compute.internal   Ready    worker   26h   v1.16.2\nip-10-0-155-122.eu-central-1.compute.internal   Ready    master   26h   v1.16.2\nip-10-0-162-206.eu-central-1.compute.internal   Ready    worker   26h   v1.16.2\nip-10-0-168-102.eu-central-1.compute.internal   Ready    master   26h   v1.16.2\n#{chosen-node}#    Ready    worker   25h   v1.16.2\n# end::openshift[]\n# tag::minikube[]\ndevnation       Ready    control-plane,master   3d    v1.21.2\n#{chosen-node}#   Ready    <none>                 42h   v1.21.2\n# end::minikube[]\n----\n\nThen pick a node in the list to label (such as the one highlighted)\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes,+quotes\"]\n----\nkubectl label nodes {chosen-node} #color=blue# #<.>\n----\n<.> Notice that this matches the affinity in the pod\n\n[.console-output]\n[source,bash,subs=\"+attributes\"]\n----\nnode/{chosen-node} labeled\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/_partials/find_node_for_pod.adoc",
    "content": "[.console-input]\n[source,bash,subs=\"+macros\"]\n----\nNODE=$(kubectl get pod -o jsonpath='{.items[0].spec.nodeName}') #<.>\necho ${NODE}\n\n----\n<.> the `.items[0]` is because we're asking for all pods, but we know our list will contain only one element"
  },
  {
    "path": "documentation/modules/ROOT/pages/_partials/invoke-service.adoc",
    "content": "[k8s-env='']\n[k8s-cli='']\n[doc-sec='']\n\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/_partials/set-env-vars.adoc",
    "content": ".Environment Variables\n\n[cols=\"4*^,4*.\"]\n|===\n|**Variable** |**Description** |**Default Value** | **e.g.**\n\n|REGISTRY_USERNAME\n|The Container Registry User Id that will be used to authenticate against the container registry `$REGISTRY_URL`\n|\n|demo\n\n|REGISTRY_PASSWORD\n|The Container Registry User Password that will be used to authenticate against the container registry `$REGISTRY_URL`\n|\n|demopassword\n\n|REGISTRY_URL\n|The Container Registry URL, defaults to https://index.docker.io\n|https://index.docker.io\n|https://quay.io/v2\n\n|DESTINATION_IMAGE_NAME\n|The fully qualified image name that will be built\n|\n| quay.io/foo/bar:v1.0\n|==="
  },
  {
    "path": "documentation/modules/ROOT/pages/_partials/verify-setup.adoc",
    "content": "\nThe following checks ensure that each chapter exercises are done with the right environment settings.\n\n[#minikube-config-view]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nminikube config view\n----\n\nThe command should return an output as shown:\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\n- profile: devnation\n- vm-driver: virtualbox\n- cpus: 2\n- kubernetes-version: {kubernetes-version}\n- memory: 6144\n----\n\n[#k8s-cluster-info]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl cluster-info\n----\n\nThe command should return an output as shown:\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nKubernetes master is running at https://192.168.99.100:8443\nKubeDNS is running at https://192.168.99.100:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy\n----\n\n[NOTE]\n====\nTo further debug and diagnose cluster problems, use `kubectl cluster-info dump`.\n====\n\n* Set your local docker to use the minikube docker daemon\n\n[#minikube-set-env]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\neval $(minikube docker-env)\n----\n\n* Kubernetes should be {kubernetes-version}\n\n[#kubectl-version]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl version\n----\n--\n\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/_partials/watching-logs.adoc",
    "content": "[kube-ns='kubernetestutorial']\n[kube-svc='']\n\nSince a Cron job source is used in this section of the tutorial, it would emit events every minute. We can watch the logs of the service to see the messages delivered.\n\nThe logs could be watched using the command:\n[tabs]\n====\nkubectl::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl logs -n {kube-ns} -f <pod-name> -c user-container\n----\n--\noc::\n+\n--\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\noc logs -n {kube-ns} -f <pod-name> -c user-container\n----\n--\n====\n\n[TIP]\n====\n* Using stern with the command `stern  -n {kube-ns} {kube-svc}`, to filter the logs further add `-c user-container` to the stern command.\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nstern -n {kube-ns} -c user-container {kube-svc} \n----\n====\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/blue-green.adoc",
    "content": "= Blue/Green\n\nhttps://martinfowler.com/bliki/BlueGreenDeployment.html[Here] you can find a description and history of Blue/Green Deployment.\n\nMake sure you are in the correct namespace\n\n:section-k8s: bluegreen\n:set-namespace: myspace\n\ninclude::partial$set-context.adoc[]\n\nMake sure nothing else is deployed:\n\n[#no-resources-blue-green]\n[.console-input]\n[source, bash]\n----\nkubectl get all\n----\n\n[.console-output]\n[source,bash]\n----\nNo resources found in myspace namespace.\n----\n\nDeploy V1 of `myboot`:\n\n[#deploy-v1-blue-green]\n[.console-input]\n[source, bash]\n----\nkubectl apply -f apps/kubefiles/myboot-deployment-resources-limits.yml\n----\n\nScale to 2 replicas:\n\n[#scale-v1-blue-green]\n[.console-input]\n[source, bash]\n----\nkubectl scale deployment/myboot --replicas=2\n----\n\nWatch and `show-labels`:\n\n[#labels-v1-blue-green]\n[.console-input]\n[source, bash]\n----\nkubectl get pods -w --show-labels\n----\n\nDeploy the service:\n\n[#deploy-service-blue-green]\n[.console-input]\n[source, bash]\n----\nkubectl apply -f apps/kubefiles/myboot-service.yml\n----\n\n:section-k8s: bluegreen\n:service-exposed: myboot\ninclude::partial$env-curl.adoc[]\n\nAnd run loop script:\n\ninclude::partial$loop.adoc[]\n\nDeploy V2 of `myboot`:\n\n[#deploy-v2-blue-green]\n[.console-input]\n[source, bash]\n----\nkubectl apply -f apps/kubefiles/myboot-deployment-resources-limits-v2.yml\n----\n\nVerify that the new pod/deployment carries the new code:\n\n[#exec-v2-blue-green]\n[.console-input]\n[source, bash]\n----\nPODNAME=$(kubectl get pod -l app=myboot-next -o name)\nkubectl exec -it $PODNAME -- curl localhost:8080\n----\n\n[.console-output]\n[source,bash]\n----\nJambo from Spring Boot! 1 on myboot-next-66b68c6659-ftcjr\n----\n\nNow update the single Service to point to the new pod and go GREEN:\n\n[#patch-service-green]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl patch svc/myboot -p '{\"spec\":{\"selector\":{\"app\":\"myboot-next\"}}}'\n----\n\n[.console-output]\n[source,bash]\n----\nAloha from Spring Boot! 240 on myboot-d78fb6d58-929wn\nJambo from Spring Boot! 2 on myboot-next-66b68c6659-ftcjr\nJambo from Spring Boot! 3 on myboot-next-66b68c6659-ftcjr\nJambo from Spring Boot! 4 on myboot-next-66b68c6659-ftcjr\n----\n\nDetermine that you prefer Hawaiian (blue) to French (green) and fallback:\n\nNow update the single Service to point to the new pod and go BLUE:\n\n[#patch-service-blue]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl patch svc/myboot -p '{\"spec\":{\"selector\":{\"app\":\"myboot\"}}}'\n----\n\n[.console-output]\n[source,bash]\n----\nJambo from Spring Boot! 17 on myboot-next-66b68c6659-ftcjr\nAloha from Spring Boot! 257 on myboot-d78fb6d58-vqvlb\nAloha from Spring Boot! 258 on myboot-d78fb6d58-vqvlb\n----\n\n== Clean Up\n\n[#clean]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete service myboot\nkubectl delete deployment myboot\nkubectl delete deployment myboot-next\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/building-images.adoc",
    "content": "= Building Images\n\n// See antora yaml (such as github-pages.yml) to change what attribute docker-host is set to\n\n== Prerequisite\n\nIn this section, we are assuming you are running Docker in your local machine (either using Docker Tools or native Docker).\n\nIMPORTANT: To make it work correctly, you need to run this section in a new terminal window to avoid using the Kubernetes (`minikube`) environment used in previous sections.\n\n== Build your application artifact\n\nFirst let's take a quick look at the application we're looking to build\n\n:quick-open-file: MyRESTController.java\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n.MyRESTController.java\nimage::hello-world-app.png[]\n\nCompile, build and test the Spring Boot Java project:\n\n[#build-building-images]\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\ncd apps/helloworld/springboot\nmvn clean package\njava -jar target/boot-demo-1.0.0.jar\n----\n\nThen `curl` it in a separate terminal:\n\n[.console-input]\n[source, bash]\n----\ncurl localhost:8080\n----\n\n[.console-output]\n[source,bash]\n----\nAloha from Spring Boot! 1 on unknown\n----\n\n`unknown` because the environment variable is not currently set, it will be inside of a Docker container and inside of Kubernetes.\n\n== Build container image\n\nNOTE: Change `quay.io` for your registry (e.g. `docker.io`) and `{myrepo}` to your organization.  This next step does assume you have a working installation of Docker for Mac/Windows/Linux.\n\n[#build-container--building-images]\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\ndocker build -t quay.io/{myrepo}/myapp:v1 .\n----\n\nResults:\n\n[.console-output]\n[source,bash, subs=\"+attributes\"]\n----\nSending build context to Docker daemon  14.47MB\nStep 1/6 : FROM openjdk:8u151\n ---> a30a1e547e6d\nStep 2/6 : ENV JAVA_APP_JAR boot-demo-1.0.0.jar\n ---> Using cache\n ---> 62b714308856\nStep 3/6 : WORKDIR /app/\n ---> Using cache\n ---> aefc5bf44b15\nStep 4/6 : COPY target/$JAVA_APP_JAR .\n ---> f881c5f5815b\nStep 5/6 : EXPOSE 8080\n ---> Running in 4e9adc135345\nRemoving intermediate container 4e9adc135345\n ---> 2909459c83f6\nStep 6/6 : CMD java $JAVA_OPTIONS -jar $JAVA_APP_JAR\n ---> Running in 46bcab555de7\nRemoving intermediate container 46bcab555de7\n ---> 85b78b9b70b1\nSuccessfully built 85b78b9b70b1\nSuccessfully tagged quay.io/{myrepo}/myapp:v1\n----\n\n== Run the container image\n\nRun and Test your newly created Docker container:\n\n[#run-container-building-images]\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\ndocker run --rm -it -p 8080:8080 --name myapp quay.io/{myrepo}/myapp:v1\n----\n\n[#curl-container-building-images]\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\ncurl {docker-host}:8080\n----\n\n[.console-output]\n[source,bash]\n----\nAloha from Spring Boot! 1 on 76851270a3e7\n----\n\n[#curl-sys-container-building-images]\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\ncurl {docker-host}:8080/sysresources\n----\n\n[.console-output]\n[source,bash]\n----\nMemory: 1268 Cores: 3\n----\n\nThese numbers are based on the memory and CPUs allocated to the Docker daemon as seen in the image below:\n\n.Docker settings\nimage::docker-settings.png[Docker Settings]\n\n[#curl-consume-container-building-images]\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\ncurl {docker-host}:8080/consume\n----\n\n[.console-output]\n[source,bash]\n----\nAllocated about 80% (1.2 GiB) of the max allowed JVM memory size (1.2 GiB)\n----\n\nStop & remove the Docker container:\n\n----\ncontrol-c\n----\n\n== Run your container with constrained resources\n\nNow, constrain the resources associated with this Linux container\n\n[#run-container-constrained-building-images]\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\ndocker run --rm -it -p 8080:8080 -m 400m --cpus=\"1\" --name myapp quay.io/{myrepo}/myapp:v1\n----\n\nAsk for the container's resources:\n\n[#curl-sys-constrained-container-building-images]\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\ncurl {docker-host}:8080/sysresources\n----\n\n[.console-output]\n[source,bash]\n----\nMemory: 1268 Cores: 3\n----\n\nCrash it:\n\n[#curl-consume-crash-container-building-images]\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\ncurl {docker-host}:8080/consume\n----\n\n== Fix memory problems\n\nTo correct this behavior use a different Dockerfile:\n\n[#build-mem-container-building-images]\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\ndocker build -t quay.io/{myrepo}/myapp:v1 -f Dockerfile_Memory .\n----\n\nNow docker run it:\n\n[#run-sys-constrained-fix-container-building-images]\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\ndocker run --rm -it -p 8080:8080 -m 400m --cpus=\"1\" --name myapp quay.io/{myrepo}/myapp:v1\n----\n\nAnd `curl` it:\n\n[#curl-sys-constrained-fix-container-building-images]\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\ncurl {docker-host}:8080/sysresources\n----\n\n[.console-output]\n[source,bash]\n----\nMemory: 112 Cores: 3\n----\n\nAnd try to crash it:\n\n[#curl-consume-fix-container-building-images]\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\ncurl {docker-host}:8080/consume\n----\n\n[.console-output]\n[source,bash]\n----\nAllocated about 80% (98.0 MiB) of the max allowed JVM memory size (112.0 MiB)\n----\n\nOnce you are happy with your container image, push it up to your favorite registry:\n\n[#push-container-building-images]\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\ndocker login quay.io\ndocker push quay.io/{myrepo}/myapp:v1\n----\n\n[.console-output]\n[source,bash]\n----\n.\n.\n.\n20c527f217db: Pushed\n61c06e07759a: Pushed\nbcbe43405751: Pushed\ne1df5dc88d2c: Pushed\nv1: digest: sha256:d22d4af6e297a024b061dbaae05be76c771fdb1db51643dc2dd8b8e047f79647 size: 2630\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/configmap.adoc",
    "content": "= ConfigMap\n\nConfigMap is the Kubernetes resource that allows you to externalize your application's configuration.\n\n*_An app’s config is everything that is likely to vary between deploys (staging, production, developer environments, etc)._*\n\nhttps://12factor.net/config[12 Factor Apps]\n\n== Environment Variables\n\nMyRESTController.java includes a small chunk of code that looks to the environment\n\n[source,java]\n----\n   @RequestMapping(\"/configure\")\n   public String configure() {\n        String databaseConn = environment.getProperty(\"DBCONN\",\"Default\");\n        String msgBroker = environment.getProperty(\"MSGBROKER\",\"Default\");\n        String hello = environment.getProperty(\"GREETING\",\"Default\");\n        String love = environment.getProperty(\"LOVE\",\"Default\");\n        return \"Configuration: \\n\"\n            + \"databaseConn=\" + databaseConn + \"\\n\"\n            + \"msgBroker=\" + msgBroker + \"\\n\"\n            + \"hello=\" + hello + \"\\n\"\n            + \"love=\" + love + \"\\n\";\n   }\n----\n\nEnvironment variables can be manipulated at the Deployment level. \nChanges cause Pod redeployment.\n\nDeploy `myboot`:\n\n[#deploy-myboot-configmaps]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-deployment.yml\n----\n\nDeploy `myboot` Service:\n\n[#deploy-myboot-service-configmaps]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-service.yml\n----\n\nAnd watch the pods status:\n\n:section-k8s: configmap\ninclude::partial$watching-pods.adoc[]\n\nAsk the application for its configuration:\n\n:section-k8s: configmaps\n:service-exposed: myboot\ninclude::partial$env-curl.adoc[]\n\n[#get-config-configmaps]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncurl $IP:$PORT/configure\n----\n\n[.console-output]\n[source,bash]\n----\nConfiguration for : myboot-66d7d57687-jsbz7\ndatabaseConn=Default\nmsgBroker=Default\ngreeting=Default\nlove=Default\n----\n\n== Set Environment Variables\n\n[#set-env-vars]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl set env deployment/myboot GREETING=\"namaste\" \\\n  LOVE=\"Aloha\" \\\n  DBCONN=\"jdbc:sqlserver://45.91.12.123:1443;user=MyUserName;password=*****;\"\n----\n\nWatch the pods being reborn:\n\n[.console-output]\n[source,bash]\n----\nNAME                      READY   STATUS        RESTARTS   AGE\nmyboot-66d7d57687-jsbz7   1/1     Terminating   0          5m\nmyboot-785ff6bddc-ghwpc   1/1     Running       0          13s\n----\n\n[#get-config2-configmaps]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncurl $IP:$PORT/configure\n----\n\n[.console-output]\n[source,bash]\n----\nConfiguration for : myboot-5fd9dd9c59-58xbh\ndatabaseConn=jdbc:sqlserver://45.91.12.123:1443;user=MyUserName;password=*****;\nmsgBroker=Default\ngreeting=namaste\nlove=Aloha\n----\n\nDescribe the deployment:\n\n:section-k8s: configmaps\n:describe-deployment-name: myboot\n\ninclude::partial$describe-deployment.adoc[]\n\n[.console-output]\n[source,bash]\n----\n...\n  Containers:\n   myboot:\n    Image:      quay.io/burrsutter/myboot:v1\n    Port:       8080/TCP\n    Host Port:  0/TCP\n    Environment:\n      GREETING:  namaste\n      LOVE:      Aloha\n      DBCONN:    jdbc:sqlserver://45.91.12.123:1443;user=MyUserName;password=*****;\n    Mounts:      <none>\n  Volumes:       <none>\n...  \n----\n\nRemove environment variables:\n\n[#remove-env-vars-configmaps]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl set env deployment/myboot GREETING- \\\n  LOVE- \\\n  DBCONN-\n----\n\nAnd verify that they have been removed:\n\n[#get-config3-configmaps]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncurl $IP:$PORT/configure\n----\n\n[.console-output]\n[source,bash]\n----\nConfiguration for : myboot-66d7d57687-xkgw6\ndatabaseConn=Default\nmsgBroker=Default\ngreeting=Default\nlove=Default\n----\n\n==  Create a ConfigMap\n\n[#create-configmap-configmaps]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl create cm my-config --from-env-file=apps/config/some.properties\n----\n\n[#get-configmap-configmaps]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get cm\nkubectl get cm my-config\nkubectl get cm my-config -o json\n----\n\n[.console-output]\n[source,bash]\n----\n...\n    \"data\": {\n        \"GREETING\": \"jambo\",\n        \"LOVE\": \"Amour\"\n    },\n    \"kind\": \"ConfigMap\",\n...    \n----\n\nOr you can describe the `ConfigMap` object:\n\n[#describe-configmap-configmaps]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl describe cm my-config\n----\n\n[.console-output]\n[source,bash]\n----\nName:         my-config\nNamespace:    myspace\nLabels:       <none>\nAnnotations:  <none>\n\nData\n====\nGREETING:\n====\njambo\nLOVE:\n====\nAmour\nEvents:  <none>\n----\n\n.Using `kubectl edit` to view resources\n****\nFor large files you might find using `kubectl edit` is more convenient for viewing resources on the cluster.  In our case, we can view the config map by running the following (and aborting any changes!):\n\ninclude::partial$tip_vscode_kube_editor.adoc[]\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl edit cm my-config\n----\n****\n\nNow deploy the app with its request for the `ConfigMap`:\n\n[#deploy-myboot-configmap-configmaps]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-deployment-configuration.yml\n----\n\nAnd get its configure endpoint:\n\n[#get-config4-configmaps]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncurl $IP:$PORT/configure\n----\n\n[.console-output]\n[source,bash]\n----\nConfiguration for : myboot-84bfcff474-x6xnt\ndatabaseConn=Default\nmsgBroker=Default\ngreeting=jambo\nlove=Amour\n----\n\nAnd switch to the other properties file by recreating the `ConfigMap`:\n\n[#delete-pod-configmap-configmaps]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete cm my-config\nkubectl create cm my-config --from-env-file=apps/config/other.properties\nkubectl delete pod -l app=myboot --wait=false\n----\n\n[#get-config5-configmaps]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncurl $IP:$PORT/configure\n----\n\n[.console-output]\n[source,bash]\n----\nConfiguration for : myboot-694954fc6d-nzdvx\ndatabaseConn=jdbc:sqlserver://123.123.123.123:1443;user=MyUserName;password=*****;\nmsgBroker=tcp://localhost:61616?jms.useAsyncSend=true\nhello=Default\nlove=Default\n----\n\nThere are a lot more ways to have fun with ConfigMaps. The core documentation has you manipulate a Pod specification instead of a Deployment, but the results are basically the same:\nhttps://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap\n\n\n== Clean Up\n\n[#clean-configmaps]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete deployment myboot\nkubectl delete cm my-config\nkubectl delete service myboot\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/crds.adoc",
    "content": "= Operators\ninclude::_attributes.adoc[]\n:watch-terminal: Terminal 2\n:log-terminal: Terminal 3\n:section-namespace: pizzahat\n\nOperators are a way of extending the functionality of our Kubernetes cluster by installing automated controllers to manage extensions we provide to the underlying Kubernetes API.  \n\nIn this section we'll take a deeper look at how operators interact with the Kubernetes API to do this\n\n.Operators in the Real World\n****\nWhen demonstrating this tutorial in a master class, it can be good to show the Kafka Operator in Openshift (as roughly outlined <<Kafka for OpenShift,here>>).  Key Points when showing on an OpenShift cluster: \n\n. Use OperatorHub to show how many different Operators there are.  \n. Install the `AMQStreams` or `Strimzi` Operator to add Kafka support (i.e. CRDs as we'll see) to the cluster \n. Once the operator is installed, pick a namespace to install a `Kafka` CR in\n. Show in the Developer Perspective the Kafka being created by the operator\n\nIn this section of the tutorial we'll be demonstrating these aspects of operators with a home grown toy \"Pizza Operator\"\n****\n\n== Preparation\n\n=== Namespace\n\nWe'll need a namespace where we're house our operator deployment and our `CustomResources` upon which the operator will operate\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl create namespace {section-namespace}\nkubectl config set-context --current --namespace={section-namespace}\n----\n\n=== Watch\n\nIf it's not open already, you'll want to have a terminal open (call it *{watch-terminal}*) to watch what's going on with the pods in our current namespace\n\n:section-k8s: crd\ninclude::partial$watching-pods-with-nodes.adoc[]\n\n=== Logs\n\nWe'll want to open a third terminal (call it *{log-terminal}*) where we'll use a tool called `stern` to watch the output of certain pods\n\ninclude::partial$open-terminal-in-editor-inset.adoc[]\n\n:stern-namespace: {section-namespace}\n:stern-pattern: p-pod\n:section-k8s: crd\n\ninclude::partial$stern-watch.adoc[]\n\n\n== CRDs\n\nCustom Resources extend the API\n\nCustom Controllers provide the functionality - continually maintains the desired state -  to monitor its state and reconcile the resource to match with the configuration\n\nhttps://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/\n\nhttps://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/\n\nCustom Resource Definitions (CRDs) in version 1.7\n\nCRDs extend the Kubernetes API.  We can see these api resources readily: \n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl api-resources\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                              SHORTNAMES   APIVERSION                             NAMESPACED   KIND\nbindings                                       v1                                     true         Binding\ncomponentstatuses                 cs           v1                                     false        ComponentStatus\nconfigmaps                        cm           v1                                     true         ConfigMap\nendpoints                         ep           v1                                     true         Endpoint\n... #<.>\n----\n<.> This list is truncated\n\nIn the list you will find some of the resources we've already learned about, like `Deployments`\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl api-resources | grep Deployment\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ndeployments                       deploy       apps/v1                                true         Deployment\n----\n\n`CustomResourceDefinition` s are a sub-set of the Kubernetes `api-resources`.  Let's see if there are any CRDs already installed in our cluster\n\n[#get-crds]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get crds --all-namespaces\n----\n\n[tabs]\n====\nMinikube::\n+\n--\nIf you are using something like minikube, you will find that there are no CRDs installed yet\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNo resources found\n----\n--\nOpenShift::\n+\n--\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                                                              CREATED AT\nalertmanagerconfigs.monitoring.coreos.com                         2021-07-12T01:37:49Z\nalertmanagers.monitoring.coreos.com                               2021-07-12T01:37:53Z\napiservers.config.openshift.io                                    2021-07-12T01:37:06Z\nauthentications.config.openshift.io                               2021-07-12T01:37:06Z\nauthentications.operator.openshift.io                             2021-07-12T01:37:53Z\nbaremetalhosts.metal3.io                                          2021-07-12T01:38:25Z\nbuilds.config.openshift.io                                        2021-07-12T01:37:06Z\ncatalogsources.operators.coreos.com                               2021-07-12T01:37:49Z\ncloudcredentials.operator.openshift.io                            2021-07-12T01:37:10Z\n... #<.>\n----\n<.> This list has been truncated\n\n\nOpenShift is at its heart Kubernetes.  One of the main ways OpenShift extends Kubernetes is via CRDs, which explains why you find so many of them installed even on the back of a fresh installation.\n\n--\n====\n\n\n=== Example CRD\n\n:quick-open-file: pizza-crd.yaml\n\nLet's go ahead and create our own Custom Resource Definition.  Later on, this Custom Resources created from this definition will be something that our operator will operate upon.  Take a look at `{quick-open-file}` to see what the CRD we'll be creating looks like\n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[source, yaml]\n.{quick-open-file}\n----\ninclude::example$pizza-crd.yaml[]\n----\n<1> This is a description that will be shown when somebody attempts to describe the CRD\n<2> This describes one of the values our `CustomResource` will have, namely, the (`array`) list of (`string`) toppings\n<3> This describes the second field our `CustomResource` can define in its spec, the (`string`) name of the sauce to use\n<4> This is the name that our CustomResources will have.  Sort of like `Deployment` or `Pod`\n\n[IMPORTANT]\n====\nMany CRDs include metadata about the fields that are exposed so that the CR can be validated by the Kubernetes API.  Prior to API version `v1` this was not enforced, after Kubernetes v1.22 all `CustomResources` will need to be `v1` and thus will need to define their object schema\n====\n\nNow let's go ahead ad add this CRD to our cluster so that we can create `Pizza` Custom Resources.\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/pizzas/pizza-crd.yaml\n----\n\nWe should now be able to see that our CRD is part of our API\n\n[#get-pizzas-crds]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get crds | grep pizza\n----\n\nResults:\n\n[.console-output]\n[source,bash]\n----\nNAME                           CREATED AT\npizzas.mykubernetes.acme.org   2020-07-01T08:12:00Z\n----\n\nAnd since CRDs are a subset of all `api-resources`, we should now see `pizzas` as extending our cluster's api-resources: \n\n[#get-api-pizzas-crds]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl api-resources | grep pizzas\n----\n\nYields:\n\n[.console-output]\n[source,bash]\n----\npizzas                            pz           mykubernetes.acme.org          true         Pizza\n----\n\nFinally, since we defined the schema for our `CustomResourceDefinition` we've made it easier for people to consume our api.  CRDs hook into the `kubectl describe` functionality\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl explain pizza\n----\n\nGives us this helpful output\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nKIND:     Pizza\nVERSION:  mykubernetes.acme.org/v1\n\nDESCRIPTION:\n     A custom resource for making yummy pizzas #<.>\n\nFIELDS:\n   apiVersion   <string>\n     APIVersion defines the versioned schema of this representation of an\n     object. Servers should convert recognized schemas to the latest internal\n     value, and may reject unrecognized values. More info:\n     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n   kind <string>\n     Kind is a string value representing the REST resource this object\n     represents. Servers may infer this from the endpoint the client submits\n     requests to. Cannot be updated. In CamelCase. More info:\n     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n   metadata     <Object>\n     Standard object's metadata. More info:\n     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n   sauce        <string> #<.>\n     The name of the sauce to use on our pizza\n\n   toppings     <[]string> #<.>\n     List of toppings for our pizza'\n----\n<.> Notice that this matches our overall description of the pizza\n<.> This is from the Schema section of the CRD for sauce.  It says that it's a string.  The description comes from the description field\n<.> This is from the Schema section of the CRD for toppings.  It says that it's an array of strings.  The description comes from the description field\n\n=== Deploying the Operator\n\nOur CRD is not limited to a particular namespace, but we do need a namespace to put our operator that is going to operate on our `pizza` CRs.  \n\nAt its heart, an operator is just an application, like the `myboot` application that we deployed previously.  The difference is that the operator knows to to interact with the Kubernetes API and watch for resources that it cares about.\n\n:quick-open-file: PizzaResourceWatcher.java\n\nThe Pizza operator that we're about to deploy was written in link:https://quarkus.io/[Quarkus^] using the link:https://github.com/java-operator-sdk/java-operator-sdk[java operator sdk^].  The code for this operator is present in this repo.  See the `{quick-open-file}` which is one of the key classes in the operator controller:\n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[.console-output]\n[source,java,subs=\"+macros,+attributes\"]\n.{quick-open-file}\n----\npackage org.acme;\n\nimport io.fabric8.kubernetes.api.model.ContainerBuilder;\nimport io.fabric8.kubernetes.api.model.ObjectMetaBuilder;\nimport io.fabric8.kubernetes.api.model.Pod;\nimport io.fabric8.kubernetes.api.model.PodBuilder;\nimport io.fabric8.kubernetes.api.model.PodSpecBuilder;\nimport io.fabric8.kubernetes.client.KubernetesClient;\nimport io.fabric8.kubernetes.client.KubernetesClientException;\nimport io.fabric8.kubernetes.client.Watcher;\nimport io.fabric8.kubernetes.client.dsl.NonNamespaceOperation;\nimport io.fabric8.kubernetes.client.dsl.Resource;\nimport io.quarkus.runtime.StartupEvent;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\nimport javax.enterprise.event.Observes;\nimport javax.inject.Inject;\n\npublic class PizzaResourceWatcher {\n\n    @Inject\n    KubernetesClient defaultClient;\n\n    @Inject\n    NonNamespaceOperation<PizzaResource, PizzaResourceList, PizzaResourceDoneable, Resource<PizzaResource, PizzaResourceDoneable>> crClient;\n\n    void onStartup(@Observes StartupEvent event) {\n        System.out.println(\"Startup\");\n        crClient.watch(new Watcher<PizzaResource>() { //<.>\n            @Override\n            public void eventReceived(Action action, PizzaResource resource) {\n                System.out.println(\"Event \" + action.name());\n                if (action == Action.ADDED) {\n                    final String app = resource.getMetadata().getName();\n                    final String sauce = resource.getSpec().getSauce();\n                    final List<String> toppings = resource.getSpec().getToppings();\n                    final Map<String, String> labels = new HashMap<>();\n                    labels.put(\"app\", app);\n                    final ObjectMetaBuilder objectMetaBuilder = new ObjectMetaBuilder().withName(app + \"-pod\")\n                            .withNamespace(resource.getMetadata().getNamespace()).withLabels(labels);\n                    final ContainerBuilder containerBuilder = new ContainerBuilder().withName(\"pizza-maker\")\n                            .withImage(\"quay.io/lordofthejars/pizza-maker:1.0.0\").withCommand(\"/work/application\")\n                            .withArgs(\"--sauce=\" + sauce, \"--toppings=\" + String.join(\",\", toppings));\n                    final PodSpecBuilder podSpecBuilder = new PodSpecBuilder().withContainers(containerBuilder.build())\n                            .withRestartPolicy(\"Never\");\n                    final PodBuilder podBuilder = new PodBuilder().withMetadata(objectMetaBuilder.build())\n                            .withSpec(podSpecBuilder.build());\n                    final Pod pod = podBuilder.build();\n                    defaultClient.resource(pod).createOrReplace();\n\n                }\n            }\n\n            @Override\n            public void onClose(KubernetesClientException e) {\n            }\n        });\n    }\n\n}\n----\n<.> Notice that it's watching for our custom resource of `Pizza`\n\n[TIP]\n====\nThe creation of an operator controller is outside the scope of this tutorial.  If you'd like to learn more about creating operators with Quarkus, watch link:https://bit.ly/3kwJmcd[this 20 minute tutorial^]\n====\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/pizzas/pizza-deployment.yaml\n----\n\nSoon in your watch window (*{watch-terminal}*) you should see something like this\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                                        READY   STATUS    RESTARTS   AGE\nquarkus-operator-example-5f5bf777bc-glfg9   1/1     Running   0          58s\n----\n--\n====\n\n[IMPORTANT]\n====\nWait until the deployment `STATUS` of the operator is `Running` before moving on to the next section\n====\n\n=== Make some Pizzas\n\nOnce our operator is running, it will be on the lookout for information in our `Pizza` Custom Resources and use it to (pretend to) make some pizzas by spinning up a pod configurated with information from the Custom Resource instance.\n\nFor example, consider this instance of the Pizza `CustomResourceDefinition`:\n\n[.console-output]\n[source,yaml,subs=\"+macros,+attributes\"]\n----\ninclude::example$cheese-pizza.yaml[]\n----\n\nPay special attention to:\n\n* *Sauce*: `regular`\n* *Toppings*: `mozzarella`\n\nNow let's create this `CustomResource`:\n\n[#create-pizzas-crds]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/pizzas/cheese-pizza.yaml\nkubectl get pizzas\n----\n\n[.console-output]\n[source,bash]\n----\nNAME      AGE\ncheesep   4s\n----\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl describe pizza cheesep\n----\n\n[.console-output]\n[source,bash,subs=\"+attributes\"]\n----\nName:         cheesep\nNamespace:    {section-namespace}\nLabels:       <none>\nAnnotations:  kubectl.kubernetes.io/last-applied-configuration:\n                {\"apiVersion\":\"mykubernetes.acme.org/v1beta2\",\"kind\":\"Pizza\",\"metadata\":{\"annotations\":{},\"name\":\"cheesep\",\"namespace\":\"{section-namespace}\"},\"spec\":...\nAPI Version:  mykubernetes.acme.org/v1beta2\nKind:         Pizza\n...\n----\n\nAnd in our *{watch-terminal}* we should see how the Operator responds...\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                                        READY   STATUS      RESTARTS   AGE\ncheesep-pod                                 0/1     Completed   0          3s\nquarkus-operator-example-5f5bf777bc-glfg9   1/1     Running     0          44m\n----\n\n--\n====\n\nAnd once the `cheesep-pod` completes we should see the following in *{log-terminal}*\n\n[tabs]\n====\n{log-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+quotes,+macros\"]\n----\n+ cheesep-pod › pizza-maker\npass:[cheesep-pod pizza-maker __  ____  __  _____   ___  __ ____  ______ ]\npass:[cheesep-pod pizza-maker  --/ __ \\/ / / / _ | / _ \\/ //_/ / / / __/ ]\npass:[cheesep-pod pizza-maker  -/ /_/ / /_/ / __ |/ , _/ ,< / /_/ /\\ \\   ]\npass:[cheesep-pod pizza-maker --\\___\\_\\____/_/ |_/_/|_/_/|_|\\____/___/   ]\ncheesep-pod pizza-maker 2021-07-19 08:16:26,113 INFO  [io.quarkus] (main) pizza-maker 1.0-SNAPSHOT (powered by Quarkus 1.4.0.CR1) started in 1.063s. \ncheesep-pod pizza-maker 2021-07-19 08:16:26,114 INFO  [io.quarkus] (main) Profile prod activated. \ncheesep-pod pizza-maker 2021-07-19 08:16:26,114 INFO  [io.quarkus] (main) Installed features: [cdi]\ncheesep-pod pizza-maker Doing The Base\ncheesep-pod pizza-maker Adding Sauce #regular#\ncheesep-pod pizza-maker Adding Toppings #[mozzarella]#\ncheesep-pod pizza-maker Baking\ncheesep-pod pizza-maker Baked\ncheesep-pod pizza-maker Ready For Delivery\ncheesep-pod pizza-maker 2021-07-19 08:16:26,615 INFO  [io.quarkus] (main) pizza-maker stopped in 0.000s\n----\n--\n====\n\nNotice that *Sauce* and *Toppings* matches what was specified in the `pizza` CustomResource\n\n=== Make more Pizzas\n\n:quick-open-file: meat-pizza.yaml\n\nTake a look at `{quick-open-file}` and `veggie-lovers.yaml` to show the sauce and toppings options there\n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[.console-output]\n[source,yaml,subs=\"+macros,+attributes\"]\n.{quick-open-file}\n----\ninclude::example$meat-pizza.yaml[]\n----\n\nNow make the pizzas\n\n[#create-more-pizzas-crds]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/pizzas/meat-pizza.yaml\nkubectl apply -f apps/pizzas/veggie-lovers.yaml\nkubectl get pizzas --all-namespaces\n----\n\nPod watch in the *{watch-terminal}* should show\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                                      READY  STATUS             AGE    NODE\ncheesep-pod                               0/1    Completed          8m46s  devnation\nmeatsp-pod                                0/1    ContainerCreating  8s     devnation\nquarkus-operator-example-fdb76c946-cwmnq  1/1    Running            14m    devnation\nveggiep-pod                               0/1    ContainerCreating  6s     devnation\n----\n--\n====\n\nAnd this notice in our log terminal *{log-terminal}*\n\n[tabs]\n====\n{log-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes,+quotes\"]\n----\n+ meatsp-pod › pizza-maker\npass:[meatsp-pod pizza-maker __  ____  __  _____   ___  __ ____  ______ ]\npass:[meatsp-pod pizza-maker  --/ __ \\/ / / / _ | / _ \\/ //_/ / / / __/ ]\npass:[meatsp-pod pizza-maker  -/ /_/ / /_/ / __ |/ , _/ ,< / /_/ /\\ \\   ]\npass:[meatsp-pod pizza-maker --\\___\\_\\____/_/ |_/_/|_/_/|_|\\____/___/   ]\nmeatsp-pod pizza-maker 2021-07-19 08:24:48,015 INFO  [io.quarkus] (main) pizza-maker 1.0-SNAPSHOT (powered by Quarkus 1.4.0.CR1) started in 0.817s. \nmeatsp-pod pizza-maker 2021-07-19 08:24:48,016 INFO  [io.quarkus] (main) Profile prod activated. \nmeatsp-pod pizza-maker 2021-07-19 08:24:48,016 INFO  [io.quarkus] (main) Installed features: [cdi]\nmeatsp-pod pizza-maker Doing The Base\nmeatsp-pod pizza-maker Adding Sauce #extra# #<.>\nmeatsp-pod pizza-maker Adding Toppings #[mozzarella,pepperoni,sausage,bacon]#\nmeatsp-pod pizza-maker Baking\nmeatsp-pod pizza-maker Baked\nmeatsp-pod pizza-maker Ready For Delivery\nmeatsp-pod pizza-maker 2021-07-19 08:24:48,517 INFO  [io.quarkus] (main) pizza-maker stopped in 0.000s\n+ veggiep-pod › pizza-maker\npass:[veggiep-pod pizza-maker __  ____  __  _____   ___  __ ____  ______ ]\npass:[veggiep-pod pizza-maker  --/ __ \\/ / / / _ | / _ \\/ //_/ / / / __/ ]\npass:[veggiep-pod pizza-maker  -/ /_/ / /_/ / __ |/ , _/ ,< / /_/ /\\ \\   ]\npass:[veggiep-pod pizza-maker --\\___\\_\\____/_/ |_/_/|_/_/|_|\\____/___/   ]\nveggiep-pod pizza-maker 2021-07-19 08:24:55,289 INFO  [io.quarkus] (main) pizza-maker 1.0-SNAPSHOT (powered by Quarkus 1.4.0.CR1) started in 0.869s. \nveggiep-pod pizza-maker 2021-07-19 08:24:55,289 INFO  [io.quarkus] (main) Profile prod activated. \nveggiep-pod pizza-maker 2021-07-19 08:24:55,289 INFO  [io.quarkus] (main) Installed features: [cdi]\nveggiep-pod pizza-maker Doing The Base\nveggiep-pod pizza-maker Adding Sauce #extra# #<.>\nveggiep-pod pizza-maker Adding Toppings #[mozzarella,black olives]#\nveggiep-pod pizza-maker Baking\nveggiep-pod pizza-maker Baked\nveggiep-pod pizza-maker Ready For Delivery\nveggiep-pod pizza-maker 2021-07-19 08:24:55,790 INFO  [io.quarkus] (main) pizza-maker stopped in 0.000s\n----\n<.> Matches `sauce` and `toppings` on the meat-pizza CR\n<.> Matches `sauce` and `toppings` on the veggie-lovers CR\n--\n====\n\n=== Cleanup\n\nLet's cleanup everything in our namespace\n\n[#delete-pizzas-crds]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete all --all #<.>\nkubectl delete ns {section-namespace}\n----\n<.> Whilst namespaces do tend to automatically cleanup the resources within them, it's usually good practice to empty them out first to ensure you don't have any `finalizer` issues\n\n[.console-output]\n[source,bash]\n----\npizza.mykubernetes.acme.org \"cheesep\" deleted\npizza.mykubernetes.acme.org \"meatsp\" deleted\npizza.mykubernetes.acme.org \"veggiep\" deleted\npod \"cheesep-pod\" deleted\npod \"meatsp-pod\" deleted\npod \"quarkus-operator-example-fdb76c946-cwmnq\" deleted\npod \"veggiep-pod\" deleted\ndeployment.apps \"quarkus-operator-example\" deleted\nnamespace \"pizzahat\" deleted\n----\n\nAnd finally, let's remove our CRD (which was not bound to a specific namespace like `section-namespace`)\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete crd pizzas.mykubernetes.acme.org #<.>\n----\n<.> When deleting a crd we need to refer to it by its fully qualified name\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncustomresourcedefinition.apiextensions.k8s.io \"pizzas.mykubernetes.acme.org\" deleted\n----\n\n== Create some Kafka\n\nhttps://github.com/strimzi/strimzi-kafka-operator/blob/master/install/cluster-operator/040-Crd-kafka.yaml[Example CRD]\n\n=== Kafka for Minikube\n\nCreate a new namespace for this experiment:\n\n[#create-namespace-franz]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl create namespace franz\nkubectl config set-context --current --namespace=franz\n----\n\nFor minikube, the instructions for installation can be found here:\n\nhttps://operatorhub.io/operator/strimzi-kafka-operator[Click Install]\n\nWhat follows were the instructions from a moment in time:\n\n[#minikube-install]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncurl -sL https://github.com/operator-framework/operator-lifecycle-manager/releases/download/0.14.1/install.sh | bash -s 0.14.1\nkubectl create -f https://operatorhub.io/install/strimzi-kafka-operator.yaml\n----\n\n=== Kafka for OpenShift\n\nimage:operator-hub-openshift.png[OperatorHub in OpenShift]\n\n=== Verify Install\n\n[#verify-install]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get csv -n operators\nkubectl get crds | grep kafka\n----\n\nStart a watch in another terminal:\n\n[#watch-pods]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get pods -w\n----\n\nThen deploy the resource requesting a Kafka cluster:\n\n[#deploy-cluster]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/mykafka.yml\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                                          READY   STATUS    RESTARTS   AGE\nmy-cluster-entity-operator-66676cb9fb-fzckz   2/2     Running   0          29s\nmy-cluster-kafka-0                            2/2     Running   0          60s\nmy-cluster-kafka-1                            2/2     Running   0          60s\nmy-cluster-kafka-2                            2/2     Running   0          60s\nmy-cluster-zookeeper-0                        2/2     Running   0          92s\nmy-cluster-zookeeper-1                        2/2     Running   0          92s\nmy-cluster-zookeeper-2                        2/2     Running   0          92s\n----\n\nAnd you can get all information from Kafka:\n\n[#get-kafkas-crd]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get kafkas\n----\n\n[.console-output]\n[source,bash]\n----\nNAME         DESIRED KAFKA REPLICAS   DESIRED ZK REPLICAS\nmy-cluster   3                        3\n----\n\n=== Clean up\n\n[#clean-up]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete namespace {section-namespace}\nkubectl delete -f apps/pizzas/pizza-crd.yaml\nkubectl delete kafka my-cluster\nkubectl delete namespace franz\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/daemonset.adoc",
    "content": "= DaemonSets\ninclude::_attributes.adoc[]\n\nA DaemonSet ensures that all nodes run a copy of a Pod. \nAs nodes are added to the cluster, Pods are added to them automatically.\nWhen the nodes are deleted, they are not rescheduled but deleted.\n\nSo DaemonSet allows you to deploy a Pod across all nodes.\n\n== Preparation\n\ninclude::https://raw.githubusercontent.com/redhat-developer-demos/rhd-tutorial-common/master/minikube-multinode.adoc[]\n\n== DaemonSet\n\nDaemonSet is created using the Kubernetes `DaemonSet` resource:\n\n[source, yaml]\n----\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: quarkus-daemonset\n  labels:\n    app: quarkus-daemonset\nspec:\n  selector:\n    matchLabels:\n      app: quarkus-daemonset\n  template:\n    metadata:\n      labels:\n        app: quarkus-daemonset\n    spec:\n      containers:\n      - name: quarkus-daemonset\n        image: quay.io/rhdevelopers/quarkus-demo:v1\n----\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/quarkus-daemonset.yaml\n\nkubectl get pods -o wide\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                      READY   STATUS    RESTARTS   AGE   IP           NODE            NOMINATED NODE   READINESS GATES\nquarkus-daemonset-jl2t5   1/1     Running   0          23s   10.244.0.2   multinode       <none>           <none>\nquarkus-daemonset-r64ql   1/1     Running   0          23s   10.244.1.2   multinode-m02   <none>           <none>\n----\n\nNotice that an instance of the Quarkus Pod is deployed to every node.\n\n=== Clean Up\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete -f apps/kubefiles/quarkus-daemonset.yaml\n----"
  },
  {
    "path": "documentation/modules/ROOT/pages/exec.adoc",
    "content": "= Kubectl exec\n\nThe exec command allows you to \"shell into\" your pod and execute commands inside of that tiny linux machine that is running your application. \n\nYou can execute it this way:\n\n[.console-input]\n[source,bash]\n----\nkubectl exec -it {podname} -- /bin/bash\n----\n\nOr this way:\n\n[.console-input]\n[source,bash]\n----\nkubectl exec {podname} -- /somecommand\n----\n\nIn this section, we will be debugging an OOMKilled that is often seen when running Java inside of a container, inside of Kubernetes.\n\nMake sure the Spring Boot pod from the Resources chapter is still running:\n\n[#get-pods-exec]\n[.console-input]\n[source, bash]\n----\nkubectl get pods\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                     READY   STATUS    RESTARTS   AGE\nmyboot-d78fb6d58-69kl7   1/1     Running   2          32m\n----\n\nThen let's move inside the container by running `exec` into that running Pod:\n\n[#exec-pod-exec]\n[.console-input]\n[source, bash]\n----\nPODNAME=$(kubectl get pod  -l app=myboot -o name)\nkubectl exec -it $PODNAME -- /bin/bash\n----\n\nRun `ps` command to see the current running processes:\n\n[#exec-ps-exec]\n[.console-input]\n[source, bash]\n----\nps -ef\n----\n\n[.console-output]\n[source,bash]\n----\nUID          PID    PPID  C STIME TTY          TIME CMD\n1000610+       1       0  0 19:20 ?        00:00:00 /bin/sh -c java -XX:+PrintFlagsFinal -XX:+PrintGCDetails $JAVA\n1000610+       7       1  2 19:20 ?        00:00:14 java -XX:+PrintFlagsFinal -XX:+PrintGCDetails -jar boot-demo-0\n1000610+      43       0  0 19:27 pts/0    00:00:00 /bin/bash\n1000610+      49      43  0 19:29 pts/0    00:00:00 ps -ef\n----\n\nExecute a `top` to get an overview of the memory:\n\n[#exec-top-exec]\n[.console-input]\n[source, bash]\n----\ntop\n----\n\n// The .no-query-replace tells the course ui to not attempt to replace tokens between % %\n[.no-query-replace]\n[.console-output]\n[source,bash]\n----\ntop - 19:29:34 up 2 days,  7:02,  0 users,  load average: 0.16, 0.13, 0.14\nTasks:   4 total,   1 running,   3 sleeping,   0 stopped,   0 zombie\n%Cpu(s):  2.8 us,  3.4 sy,  0.0 ni, 93.1 id,  0.1 wa,  0.3 hi,  0.3 si,  0.0 st\nKiB Mem : 15389256 total,  6438576 free,  2289352 used,  6661328 buff/cache\nKiB Swap:        0 total,        0 free,        0 used. 13142476 avail Mem\n\n    PID USER      PR  NI    VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND\n      1 1000610+  20   0    4292    708    632 S   0.0  0.0   0:00.02 sh\n      7 1000610+  20   0 7511676 328704  16988 S   0.0  2.1   0:14.02 java\n     43 1000610+  20   0   19960   3644   3080 S   0.0  0.0   0:00.00 bash\n     50 1000610+  20   0   42672   3516   3080 R   0.0  0.0   0:00.00 top\n----\n\nGet the distro:\n\n[#exec-cat-release-exec]\n[.console-input]\n[source, bash]\n----\ncat /etc/os-release\n----\n\n[.console-output]\n[source,bash]\n----\nPRETTY_NAME=\"Debian GNU/Linux 9 (stretch)\"\nNAME=\"Debian GNU/Linux\"\nVERSION_ID=\"9\"\nVERSION=\"9 (stretch)\"\nID=debian\nHOME_URL=\"https://www.debian.org/\"\nSUPPORT_URL=\"https://www.debian.org/support\"\nBUG_REPORT_URL=\"https://bugs.debian.org/\"\n----\n\nCheck the free memory:\n\n[#exec-free-exec]\n[.console-input]\n[source, bash]\n----\nfree -h\n----\n\n[.console-output]\n[source,bash]\n----\n              total        used        free      shared  buff/cache   available\nMem:            14G        2.2G        6.1G         17M        6.4G         12G\nSwap:            0B          0B          0B\n----\n\nAnd now you might see part of the problem. \"free\" is not `cgroups` aware, it thinks it has access to the whole VMs memory.\n\nNo wonder the JVM reports a larger than accurate Max memory:\n\n[#curl-sysresources-exec]\n[.console-input]\n[source, bash]\n----\ncurl localhost:8080/sysresources\n----\n\n[.console-output]\n[source,bash]\n----\nMemory: 1324 Cores: 4\n----\n\n[NOTE]\n==== \nIf using Minikube, the cores are the core count provided by\n\n`minikube --profile devnation config set cpus 4`\n\nand the memory is a subset of the memory provided by\n\n`minikube --profile devnation config set memory 6144`\n====\n\nCheck your Java version:\n\n[#java-version-181-exec]\n[.console-input]\n[source, bash]\n----\njava -version\n----\n\n[.console-output]\n[source,bash]\n----\nopenjdk version \"1.8.0_181\"\nOpenJDK Runtime Environment (build 1.8.0_181-8u181-b13-2~deb9u1-b13)\nOpenJDK 64-Bit Server VM (build 25.181-b13, mixed mode)\n----\n\nAsk the JVM about its resource availability:\n\n[#java-version-181-settings-exec]\n[.console-input]\n[source, bash]\n----\njava -XshowSettings:vm -version\n----\n\n[.console-output]\n[source,bash]\n----\nVM settings:\n    Max. Heap Size (Estimated): 3.26G\n    Ergonomics Machine Class: server\n    Using VM: OpenJDK 64-Bit Server VM\n\nopenjdk version \"1.8.0_181\"\nOpenJDK Runtime Environment (build 1.8.0_181-8u181-b13-2~deb9u1-b13)\nOpenJDK 64-Bit Server VM (build 25.181-b13, mixed mode)\n----\n\nNow check the actual `cgroups` settings:\n\n[#cat-cgroup-exec]\n[.console-input]\n[source, bash]\n----\ncd /sys/fs/cgroup/memory/\ncat memory.limit_in_bytes\n----\n\n[.console-output]\n[source,bash]\n----\n419430400\n----\n\nAnd if you divide that 419430400 by 1024 and 1024, you end up with the 400 that was specified in the deployment YAML.\n\nIf you have a JVM of 1.8.0_131 or higher then you can try the experimental options\n\n[#java-version-131-settings-exec]\n[.console-input]\n[source, bash]\n----\njava -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap -XshowSettings:vm -version\n----\n\n[.console-output]\n[source,bash]\n----\nVM settings:\n    Max. Heap Size (Estimated): 112.00M\n    Ergonomics Machine Class: server\n    Using VM: OpenJDK 64-Bit Server VM\n\nopenjdk version \"1.8.0_181\"\nOpenJDK Runtime Environment (build 1.8.0_181-8u181-b13-2~deb9u1-b13)\nOpenJDK 64-Bit Server VM (build 25.181-b13, mixed mode)\n----\n\nTo leave this pod, simply type `exit` and hit enter:\n\n[.console-input]\n[source, bash]\n----\nexit\n----\n\n== Clean Up\n\n[.console-input]\n[source,bash]\n----\nkubectl delete deployment myboot\nkubectl delete service myboot\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/index.adoc",
    "content": "= Kubernetes Tutorial\n\nWelcome to your Kubernetes Journey!\n\nYour journey contains four steps, each one of them in a different section:\n\ninclude::../nav.adoc[]\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/ingress.adoc",
    "content": "= Ingress\n\nMake sure you are in the correct namespace.\n\n== Enable Ingress Controller\n\nIn case of using `minikube` you need to enable NGNIX Ingress controller.\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nminikube addons enable ingress -p devnation\n----\n\nWait a minute or so and verify that it has been deployed correctly:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get pods -n ingress-nginx\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ningress-nginx-admission-create-lqfh2        0/1     Completed   0          6m28s\ningress-nginx-admission-patch-z2lzj         0/1     Completed   2          6m28s\ningress-nginx-controller-69ccf5d9d8-95xgp   1/1     Running     0          6m28s\n----\n\n== Deploy Application\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncat <<EOF | kubectl apply -f -\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: quarkus-demo-deployment\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: quarkus-demo\n  template:\n    metadata:\n      labels:\n        app: quarkus-demo\n        env: dev\n    spec:\n      containers:\n      - name: quarkus-demo\n        image: quay.io/rhdevelopers/quarkus-demo:v1\n        imagePullPolicy: Always\n        ports:\n        - containerPort: 8080\nEOF\n----\n\nExpose the service:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl expose deployment quarkus-demo-deployment --type=NodePort --port=8080\n----\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get service quarkus-demo-deployment\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                      TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE\nquarkus-demo-deployment   NodePort   10.105.106.66   <none>        8080:30408/TCP   11s\n----\n\n:section-k8s: ingress\n:service-exposed: quarkus-demo-deployment\ninclude::partial$env-curl.adoc[]\n\n== Configuring Ingress\n\nAn Ingress resource is defined as:\n\n[source, yaml]\n----\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: example-ingress\n  annotations:\n    nginx.ingress.kubernetes.io/rewrite-target: /$1\nspec:\n  rules:\n  - host: kube-devnation.info\n    http:\n      paths:\n      - pathType: Prefix\n        path: /\n        backend:\n          service: \n            name: quarkus-demo-deployment\n            port:\n              number: 8080\n----\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/demo-ingress.yaml\n----\n\nGet the information from the Ingress resource:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get ingress\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME              CLASS    HOSTS                 ADDRESS          PORTS   AGE\nexample-ingress   <none>   kube-devnation.info   192.168.99.115   80      68s\n----\n\nYou need to wait until address field is set.\nIt might take some minutes.\n\nModify the `/etc/hosts` to point the hostname to the Ingress address.\n\nIMPORTANT: If you are using minikube, use the `minikube ip -p kube` as address because the Ingress IP is an internal IP.\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n./etc/hosts\n----\n172.17.0.15 kube-devnation.info\n----\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncurl kube-devnation.info\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nSupersonic Subatomic Java with Quarkus quarkus-demo-deployment-8cf45f5c8-qmzwl:1\n----\n\n== Second Deployment\n\nDeploy a second version of the service:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncat <<EOF | kubectl apply -f -\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mynode-deployment\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mynode\n  template:\n    metadata:\n      labels:\n        app: mynode\n    spec:\n      containers:\n      - name: mynode\n        image: quay.io/rhdevelopers/mynode:v1\n        ports:\n        - containerPort: 8000\nEOF\n----\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl expose deployment mynode-deployment --type=NodePort --port=8000\n----\n\n== Ingress Update\n\nThen you need to update the Ingress resource with the new `path`:\n\n[source, yaml]\n----\napiVersion: networking.k8s.io/v1beta1\nkind: Ingress\nmetadata:\n  name: example-ingress\n  annotations:\n    nginx.ingress.kubernetes.io/rewrite-target: /$1\nspec:\n  rules:\n  - host: kube-devnation.info\n    http:\n      paths:\n      - path: /\n        backend:\n          serviceName: quarkus-demo-deployment\n          servicePort: 8080\n      - path: /v2\n        backend:\n          serviceName: mynode-deployment\n          servicePort: 8000\n----\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/demo-ingress-2.yaml\n----\n\nTest it:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncurl kube-devnation.info\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nSupersonic Subatomic Java with Quarkus quarkus-demo-deployment-8cf45f5c8-qmzwl:2\n----\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncurl kube-devnation.info/v2\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNode Bonjour on mynode-deployment-77c7bf857d-5nfl4 0\n----\n\n\n== Clean Up\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete deployment mynode-deployment\nkubectl delete service mynode-deployment\n\nkubectl delete deployment quarkus-demo-deployment\nkubectl delete service quarkus-demo-deployment\n\nkubectl delete -f apps/kubefiles/demo-ingress-2.yaml\n----"
  },
  {
    "path": "documentation/modules/ROOT/pages/installation.adoc",
    "content": "= Installation & Setup\ninclude::_attributes.adoc[]\n\n[#tutorial-all-local]\n== CLI tools\n\ninclude::partial$prerequisites-kubernetes.adoc[]\n|===\n\ninclude::partial$optional-requisites.adoc[]\n|===\n\n[#download-tutorial-sources]\n== Download Tutorial Sources\n\nBefore we start setting up the environment, let's clone the tutorial sources and set the `TUTORIAL_HOME` environment variable to point to the root directory of the tutorial:\n\n:tutorial-url: {github-repo}\n:folder: kubernetes-tutorial\ninclude::https://raw.githubusercontent.com/redhat-developer-demos/rhd-tutorial-common/master/download-sources.adoc[]\n\n[IMPORTANT,subs=\"attributes+,+macros\"]\n====\n\nThis tutorial was developed and tested with:\n\n- Minikube `{minikube-version}`\n- OpenShift `{openshift-version}`\n====\n\n[#install-minikube]\n=== Install Minikube\n\ninclude::https://raw.githubusercontent.com/redhat-developer-demos/rhd-tutorial-common/master/install-minikube.adoc[]\n\n[#start-kubernetes]\n=== Start Kubernetes\n\nThere are a series of commands as steps that allow you to get started with minikube. The following section shows how to start minikube with minimal and required configurations:\n\n\n:profile: devnation\ninclude::https://raw.githubusercontent.com/redhat-developer-demos/rhd-tutorial-common/master/kubernetes-setup.adoc[]\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/jobs-cronjobs.adoc",
    "content": "= Jobs & CronJobs\ninclude::_attributes.adoc[]\n:watch-terminal: Terminal 2\n\nMost of the time, you are using Kubernetes as a platform to run \"long\" processes where their purpose is to serve responses for a given incoming request.\n\nBut Kubernetes also lets you run processes that their purpose is to execute some logic (i.e. update database, batch processing, ...) and die.\n\nKubernetes Jobs are tasks that execute some logic once. \n\nKubernetes CronJobs are Jobs that are repeated following a Cron pattern.\n\n== Preparation\n\n=== Namespace Setup\n\nMake sure you are in the correct namespace:\n\n:section-k8s: resource\n:set-namespace: myspace\n\ninclude::partial$namespace-setup-tip.adoc[]\n\ninclude::partial$set-context.adoc[]\n\n=== Watch Terminal\n\nTo be able to observe what's going on, let's open another terminal (*{watch-terminal}*) and `watch` what happens as we run our different jobs\n\n:section-k8s: cronjobs\n\ninclude::partial$watching-pods-with-nodes.adoc[]\n\n== Jobs\n\n:quick-open-file: whalesay-job.yaml\n\nA Job is created using the Kubernetes `Job` resource.  To examine one, open the `{quick-open-file}`.  Here are the interesting aspects of this file:\n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[source, yaml]\n.{quick-open-file}\n----\ninclude::example$whalesay-job.yaml[]\n----\n<.> The name of the job will be used as the value of a label `job-name` on any pods that are spawned by this job definition.\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/whalesay-job.yaml\n----\n--\n====\n\nThis should yield the following output (in successive refreshes) in *{watch-terminal}*\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash]\n----\nNAME                 READY  STATUS             AGE  NODE\nwhale-say-job-m8vxt  0/1    ContainerCreating  14s  devnation-m02\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                 READY  STATUS     AGE  NODE\nwhale-say-job-m8vxt  1/1    Running    80s  devnation-m02\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                 READY  STATUS     AGE  NODE\nwhale-say-job-m8vxt  0/1    Completed  85s  devnation-m02\n----\n--\n====\n\n\nYou can get `jobs` as any other Kubernetes resource:\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get jobs\n----\n\n[.console-output]\n[source,bash]\n----\nNAME            COMPLETIONS   DURATION   AGE\nwhale-say-job   1/1           20s        36s\n----\n--\n====\n\nSince the job is run by a pod, to get the output of the `job` execution, we need only to get the output of the pod's logs:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl logs \\\n  -l job-name=whale-say-job \\#<.>\n  --tail=-1 #<.>\n----\n<.> This is allowing us to look for any pod labeled with `job-name` (see above) set to `whale-say-job`\n<.> `--tail` tells the log command how many lines from the end of the (pod's) log to return.  So that we can see all the whimsy in this job pod's message, we set this to `-1` to see all the linesfootnote:[Normally --tail is set to -1 by default, but that's only when requesting logs from a _single specific resource_.  When there is the potential to return multiple resources' logs (as is the case here when we're asking for logs by label) the number of lines returned from each resource's logs are limited to 10 by default]\n\n[.console-output]\n[source,bash]\n----\n _________________\n< Hello DevNation >\n -----------------\n    \\\n     \\\n      \\\n                    ##        .\n              ## ## ##       ==\n           ## ## ## ##      ===\n       /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ ===\n  ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ /  ===- ~~~\n       \\______ o          __/\n        \\    \\        __/\n          \\____\\______/\n----\n\n=== Clean Up\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete -f apps/kubefiles/whalesay-job.yaml\n----\n--\n====\n\n\n== CronJobs\n\n:quick-open-file: whalesay-cronjob.yaml\n\nA CronJob is defined using the Kubernetes `CronJob` resource.  The name `cronjob` comes from Linux and represents some sort of batch process that is scheduled to run once or repeatedly.  This concept has been translated into Kubernetes as we can see in the `{quick-open-file}` file: \n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[source, yaml]\n.{quick-open-file}\n----\ninclude::example$whalesay-cronjob.yaml[]\n----\n<.> This string represents a job is executed every minute.\n<.> Here we specify our own additional label we'd like applied to `jobs` and `pods` created by the `cronjob`.  Even though the `job-name` label will still exist, it will contain a guid on every indication meaning we can't predict what the value is a priori\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/whalesay-cronjob.yaml\n----\n--\n====\n\nBut then if we look to our watch window in *{watch-terminal}*\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash]\n----\nNAME                  READY   STATUS      RESTARTS   AGE\n----\n\n--\n====\n\nNo Pod is running as CronJob is setting up (and is checked only once every 10 seconds or so, see warning below)\n\nWhile we're waiting for our cronjob to run, we can use *Terminal 1* to watch how the `cronjob` is changing:\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get cronjobs -w #<.>\n----\n<.> the `-w` flag says to watch the output (sort of like what we're doing in the *{watch-terminal}*) but only post back when the state of the observed resource's (in this case the `cronjob`) state changes.\n\nHere is some representative output after waiting almost 3 minutes (notice the job restarts)\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes,+quotes\"]\n----\nNAME                SCHEDULE      SUSPEND   ACTIVE   LAST SCHEDULE   AGE\nwhale-say-cronjob   * * * * *   False     #1#        0s              #20s# #<.>\nwhale-say-cronjob   * * * * *   False     0        31s             51s\nwhale-say-cronjob   * * * * *   False     #1#        0s              #80s# #<.>\nwhale-say-cronjob   * * * * *   False     0        23s             103s\nwhale-say-cronjob   * * * * *   False     #1#        1s              #2m21s#\n----\n<.> The first invocation took a while to start, this was not a function of the `cronjob` schedule\n<.> Notice that the next time the job is active is about 60s after the first job was active (by AGE).  And the job after that has an age of ~60s after that\n--\n====\n\nYou'll notice that every time the cronjob moves to ACTIVE (see highlight above),you should see the following in *{watch-terminal}*; \n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash]\n----\nNAME                              READY  STATUS     AGE  NODE\nwhale-say-cronjob-27108480-2ws6k  0/1    Completed  46s  devnation-m02\n----\n--\n====\n\n[WARNING]\n====\nPer the link:https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/[official Kubernetes documentation]: A cron job creates a job object about once per execution time of its schedule. We say \"about\" because there are certain circumstances where two jobs might be created, or no job might be created. We attempt to make these rare, but do not completely prevent them. Therefore, jobs should be idempotent.\n====\n\nLet's examine our cronjob by using the `describe` subcommand.  Use kbd:[CTRL+c] to cancel the `kubectl get cronjobs -w` command and replace with the following:\n\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl describe cronjobs\n----\n\nYou should then see something like this\n\n[.console-output]\n[source,bash,subs=\"+quotes\"]\n----\nName:                          whale-say-cronjob\nNamespace:                     myspace\nLabels:                        <none>\nAnnotations:                   <none>\nSchedule:                      * * * * *\nConcurrency Policy:            Allow\nSuspend:                       False\n#Successful Job History Limit:  3# #<.>\nFailed Job History Limit:      1\nStarting Deadline Seconds:     <unset>\nSelector:                      <unset>\nParallelism:                   <unset>\nCompletions:                   <unset>\nPod Template:\n  Labels:  #job-type=whale-say#\n  Containers:\n   whale-say-container:\n    Image:      docker/whalesay\n    Port:       <none>\n    Host Port:  <none>\n    Command:\n      cowsay\n      Hello DevNation\n    Environment:     <none>\n    Mounts:          <none>\n  Volumes:           <none>\n#Last Schedule Time:  Sat, 17 Jul 2021 08:06:00 +0000# #<.>\nActive Jobs:         whale-say-cronjob-27108486\nEvents:\n  Type    Reason            Age    From                Message\n  ----    ------            ----   ----                -------\n  Normal  SuccessfulCreate  6m21s  cronjob-controller  Created job whale-say-cronjob-27108480\n  Normal  SawCompletedJob   6m1s   cronjob-controller  Saw completed job: whale-say-cronjob-27108480, status: Complete\n  Normal  SuccessfulCreate  5m21s  cronjob-controller  Created job whale-say-cronjob-27108481\n  Normal  SawCompletedJob   4m56s  cronjob-controller  Saw completed job: whale-say-cronjob-27108481, status: Complete\n  Normal  SuccessfulCreate  4m21s  cronjob-controller  Created job whale-say-cronjob-27108482\n  Normal  SawCompletedJob   3m56s  cronjob-controller  Saw completed job: whale-say-cronjob-27108482, status: Complete\n  Normal  SuccessfulCreate  3m21s  cronjob-controller  Created job whale-say-cronjob-27108483\n  Normal  SawCompletedJob   2m48s  cronjob-controller  Saw completed job: whale-say-cronjob-27108483, status: Complete\n  Normal  SuccessfulDelete  2m46s  cronjob-controller  Deleted job whale-say-cronjob-27108480\n  Normal  SuccessfulCreate  2m20s  cronjob-controller  Created job whale-say-cronjob-27108484\n  Normal  SawCompletedJob   104s   cronjob-controller  Saw completed job: whale-say-cronjob-27108484, status: Complete\n  Normal  SuccessfulDelete  101s   cronjob-controller  Deleted job whale-say-cronjob-27108481\n  Normal  SuccessfulCreate  81s    cronjob-controller  Created job whale-say-cronjob-27108485\n  Normal  SawCompletedJob   54s    cronjob-controller  Saw completed job: whale-say-cronjob-27108485, status: Complete\n  Normal  SuccessfulDelete  52s    cronjob-controller  Deleted job whale-say-cronjob-27108482\n  Normal  SuccessfulCreate  21s    cronjob-controller  Created job whale-say-cronjob-27108486\n  Normal  SawCompletedJob   1s     cronjob-controller  Saw completed job: whale-say-cronjob-27108486, status: Complete\n----\n<.> Kubernetes cleans up jobs after a certain amount of time\n<.> Notice that the _Last Schedule Time_ shows the last time a job was executed.\n\nIt is important to notice that a CronJob creates a `job` (which, in turn, creates pods) whenever the schedule is activated:\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get jobs\n----\n\nWith example output after the cronjob has been around for more than 3 minutes:\n\n[.console-output]\n[source,bash]\n----\nNAME                         COMPLETIONS   DURATION   AGE\nwhale-say-cronjob-27108487   1/1           19s        2m37s\nwhale-say-cronjob-27108488   1/1           20s        97s\nwhale-say-cronjob-27108489   1/1           21s        37s\n----\n--\n====\n\nFinally, we can see the effect of job history by logging for all our jobs\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl logs \\\n  -l job-type=whale-say \\#<.>\n  --tail=-1\n----\n<.> This time we're looking to get the logs on anything created with the label `job-type` (our custom label from above) set to `whale`\n\n.NOTE\n****\nIt would less specific but we _could_ find out whale job logs without a custom label _by instead not looking to match the value on the label_ like this: \n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes,+quotes\"]\n----\nkubectl logs #-l job-name# --tail=-1\n----\n\nThis basically states that we should match any pod with a label named `job-name`\n\n****\n\n[.console-output]\n[source,bash]\n----\n _________________ \n< Hello DevNation >\n ----------------- \n    \\\n     \\\n      \\     \n                    ##        .            \n              ## ## ##       ==            \n           ## ## ## ##      ===            \n       /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ ===        \n  ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ /  ===- ~~~   \n       \\______ o          __/            \n        \\    \\        __/             \n          \\____\\______/   \n _________________ \n< Hello DevNation >\n ----------------- \n    \\\n     \\\n      \\     \n                    ##        .            \n              ## ## ##       ==            \n           ## ## ## ##      ===            \n       /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ ===        \n  ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ /  ===- ~~~   \n       \\______ o          __/            \n        \\    \\        __/             \n          \\____\\______/   \n _________________ \n< Hello DevNation >\n ----------------- \n    \\\n     \\\n      \\     \n                    ##        .            \n              ## ## ##       ==            \n           ## ## ## ##      ===            \n       /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ ===        \n  ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ /  ===- ~~~   \n       \\______ o          __/            \n        \\    \\        __/             \n          \\____\\______/   \n----\n--\n====\n\n=== Clean Up\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete -f apps/kubefiles/whalesay-cronjob.yaml\n----\n--\n====\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/kubectl.adoc",
    "content": "= kubectl: The Kubernetes Client\n\n[[talk]]\n== Talk to your Cluster\n[#kubectl-view-config]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\necho $KUBECONFIG\nkubectl config view\n----\n\n\n[[view-nodes]]\n== View Nodes\n\n[#kubectl-get-nodes]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get nodes\nkubectl get nodes --show-labels\nkubectl get namespaces\n----\n\n[[view-pods]]\n== View out-of-the-box Pods\n\nYour Kubernetes vendor likely includes many pods out-of-the-box:\n\n[#kubectl-get-pods]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get pods --all-namespaces\nkubectl get pods --all-namespaces --show-labels\nkubectl get pods --all-namespaces -o wide\n----\n\n[[deploy-app]]\n== Deploy Something\n\nCreate a Namespace and Deploy something:\n\n[#kubectl-deploy-app]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl create namespace mystuff\nkubectl config set-context --current --namespace=mystuff\n\nkubectl create deployment myapp --image=quay.io/rhdevelopers/quarkus-demo:v1\n----\n\n[[monitor-events]]\n== While monitoring Events\n\n[#kubectl-get-events]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get events -w --sort-by=.metadata.creationTimestamp\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nLAST SEEN   TYPE     REASON              OBJECT                        MESSAGE\n<unknown>   Normal   Scheduled           pod/myapp-5dcbf46dfc-ghrk4    Successfully assigned mystuff/myapp-5dcbf46dfc-ghrk4 to g\ncp-5xldg-w-a-5ptpn.us-central1-a.c.ocp42project.internal\n29s         Normal   SuccessfulCreate    replicaset/myapp-5dcbf46dfc   Created pod: myapp-5dcbf46dfc-ghrk4\n29s         Normal   ScalingReplicaSet   deployment/myapp              Scaled up replica set myapp-5dcbf46dfc to 1\n21s         Normal   Pulling             pod/myapp-5dcbf46dfc-ghrk4    Pulling image \"quay.io/burrsutter/quarkus-demo:1.0.0\"\n15s         Normal   Pulled              pod/myapp-5dcbf46dfc-ghrk4    Successfully pulled image \"quay.io/burrsutter/quarkus-dem\no:1.0.0\"\n15s         Normal   Created             pod/myapp-5dcbf46dfc-ghrk4    Created container quarkus-demo\n15s         Normal   Started             pod/myapp-5dcbf46dfc-ghrk4    Started container quarkus-demo\n----\n\n[[created-objects]]\n== Created Objects\n\n=== Deployments\n[#kubectl-get-deployments]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get deployments\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME    READY   UP-TO-DATE   AVAILABLE   AGE\nmyapp   1/1     1            1           95s\n----\n\n=== Replicasets\n[#kubectl-get-replicasets]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get replicasets\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME               DESIRED   CURRENT   READY   AGE\nmyapp-5dcbf46dfc   1         1         1       2m1s\n----\n\n=== Pods\n\n[#kubectl-get-podsx]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get pods --show-labels\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                     READY   STATUS    RESTARTS   AGE     LABELS\nmyapp-5dcbf46dfc-ghrk4   1/1     Running   0          2m18s   app=myapp,pod-template-hash=5dcbf46dfc\n----\n\n=== Logs\n[#kubectl-logs]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl logs -l app=myapp\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\n2020-03-22 14:41:30,497 INFO  [io.quarkus] (main) Quarkus 0.22.0 started in 0.021s. Listening on: http://0.0.0.0:8080\n2020-03-22 14:41:30,497 INFO  [io.quarkus] (main) Installed features: [cdi, resteasy]\n----\n\n== Expose a Service\n[#kubectl-expose]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl expose deployment myapp --port=8080 --type=LoadBalancer\n----\n\n=== while watching Services\n\n:section-k8s: kubectl\ninclude::partial$watching-services.adoc[]\n\n== Talk to the App\n\n:section-k8s: kubectl\n:service-exposed: myapp\ninclude::partial$env-curl.adoc[]\n\n== Scale the App\n\nOpen three Terminal Windows.\n\n=== Terminal 1\n[#watch-pods]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get pods -w\n----\n\n=== Terminal 2\n\n:service-exposed: myapp\n\ninclude::partial$env-curl.adoc[]\n\nPoll the endpoint:\n\n[#poll-endpoint]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nwhile true\ndo curl $IP:$PORT\nsleep {curl-loop-sleep-time}\ndone\n----\n\nResults of the polling:\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nSupersonic Subatomic Java with Quarkus myapp-5dcbf46dfc-ghrk4:289\nSupersonic Subatomic Java with Quarkus myapp-5dcbf46dfc-ghrk4:290\nSupersonic Subatomic Java with Quarkus myapp-5dcbf46dfc-ghrk4:291\nSupersonic Subatomic Java with Quarkus myapp-5dcbf46dfc-ghrk4:292\nSupersonic Subatomic Java with Quarkus myapp-5dcbf46dfc-ghrk4:293\n----\n\n=== Terminal 3\n\nChange replicas:\n\n[#change-replicas]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl scale deployment myapp --replicas=3\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                     READY   STATUS              RESTARTS   AGE\nmyapp-5dcbf46dfc-6sn2s   0/1     ContainerCreating   0          4s\nmyapp-5dcbf46dfc-ghrk4   1/1     Running             0          5m32s\nmyapp-5dcbf46dfc-z6hqw   0/1     ContainerCreating   0          4s\n----\n\nStart a rolling update by changing the image:\n\n[#set-image-myboot-v1]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl set image deployment/myapp quarkus-demo=quay.io/rhdevelopers/myboot:v1\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nSupersonic Subatomic Java with Quarkus myapp-5dcbf46dfc-6sn2s:188\nSupersonic Subatomic Java with Quarkus myapp-5dcbf46dfc-z6hqw:169\nAloha from Spring Boot! 0 on myapp-58b97dbd95-vxd87\nAloha from Spring Boot! 1 on myapp-58b97dbd95-vxd87\nSupersonic Subatomic Java with Quarkus myapp-5dcbf46dfc-6sn2s:189\nSupersonic Subatomic Java with Quarkus myapp-5dcbf46dfc-z6hqw:170\nAloha from Spring Boot! 2 on myapp-58b97dbd95-vxd87\n----\n\nAnnotate the change cause for the records (documentation purpose):\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl annotate deployment/myapp kubernetes.io/change-cause=\"Reverting to old SpringBoot version\" --overwrite\n----\n\nExplore the revision history:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl rollout history deployment/myapp\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ndeployment.apps/myapp\nREVISION  CHANGE-CAUSE\n1         <none>\n2         Restoring to old SpringBoot version\n----\n\nList the ReplicaSets for the deployment:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get rs\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME               DESIRED   CURRENT   READY   AGE\nmyapp-65c9d96df4   3         3         3       2m\nmyapp-67fc4b6f94   0         0         0       8m\n----\n\nRevert to the most recent successful version (e.g., from Revision 2 back to Revision 1), use the undo command:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl rollout undo deployment/myapp\n----\n\n[TIP]\n====\nYou can specify the revision number adding the following option: `--to-revision=1`\n====\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nAloha from Spring Boot! 16 on myapp-fc6b78bb-d495j\nSupersonic Subatomic Java with Quarkus myapp-76d84b5f46-jlzs7:1\nSupersonic Subatomic Java with Quarkus myapp-76d84b5f46-jlzs7:2\nAloha from Spring Boot! 17 on myapp-fc6b78bb-d495j\nAloha from Spring Boot! 18 on myapp-fc6b78bb-d495j\nSupersonic Subatomic Java with Quarkus myapp-76d84b5f46-jlzs7:3\nAloha from Spring Boot! 19 on myapp-fc6b78bb-d495j\nSupersonic Subatomic Java with Quarkus myapp-76d84b5f46-jlzs7:4\n----\n\n=== Clean Up\n[#delete-namespace]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete namespace mystuff\nkubectl config set-context --current --namespace=default\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/live-ready.adoc",
    "content": "= Liveness & Readiness\n\nMake sure you are in the correct namespace:\n\n:section-k8s: liveready\n:set-namespace: myspace\n\ninclude::partial$set-context.adoc[]\n\nMake sure nothing else is deployed:\n\n[#no-resources-live-ready]\n[.console-input]\n[source, bash]\n----\nkubectl get all\n----\n\n[.console-output]\n[source.bash]\n----\nNo resources found in myspace namespace.\n----\n\n:quick-open-file: myboot-deployment-live-ready.yml\n\nNow we're going to deploy our application with a Liveness and Readiness probe set.  Take a look at `{quick-open-file}`\n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[.console-output]\n[source,yaml]\n.{quick-open-file}\n----\ninclude::example$myboot-deployment-live-ready.yml[]\n----\n\nNow apply this deployment with the following command\n\n[#create-app-live-ready]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-deployment-live-ready.yml\n----\n\nDescribe the deployment:\n\n:describe-deployment-name: myboot\n:section-k8s: live-ready\n\ninclude::partial$describe-deployment.adoc[]\n\n[.console-output]\n[source.bash]\n----\n...\n    Image:      quay.io/rhdevelopers/myboot:v1\n    Port:       8080/TCP\n    Host Port:  0/TCP\n    Limits:\n      cpu:     1\n      memory:  400Mi\n    Requests:\n      cpu:        250m\n      memory:     300Mi\n    Liveness:     http-get http://:8080/ delay=10s timeout=2s period=5s #success=1 #failure=3\n    Readiness:    http-get http://:8080/health delay=10s timeout=1s period=3s #success=1 #failure=3\n...    \n----\n\nDeploy a Service:\n\n[#deploy-service-live-ready]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-service.yml\n----\n\n:section-k8s: liveready\n:service-exposed: myboot\ninclude::partial$env-curl.adoc[]\n\nAnd run loop script:\n\ninclude::partial$loop.adoc[]\n\nChange the image:\n\n[#change-deployment-v2-live-ready]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl set image deployment/myboot myboot=quay.io/rhdevelopers/myboot:v2\n----\n\nAnd notice the error free rolling update:\n\n[.console-output]\n[source.bash]\n----\nAloha from Spring Boot! 131 on myboot-845968c6ff-k4rvb\nAloha from Spring Boot! 134 on myboot-845968c6ff-9wvt9\nAloha from Spring Boot! 122 on myboot-845968c6ff-9824z\nBonjour from Spring Boot! 0 on myboot-8449d5468d-m88z4\nBonjour from Spring Boot! 1 on myboot-8449d5468d-m88z4\nAloha from Spring Boot! 135 on myboot-845968c6ff-9wvt9\nAloha from Spring Boot! 133 on myboot-845968c6ff-k4rvb\nAloha from Spring Boot! 137 on myboot-845968c6ff-9wvt9\nBonjour from Spring Boot! 3 on myboot-8449d5468d-m88z4\n----\n\nLook at the Endpoints to see which pods are part of the Service:\n\n[#get-endpoints-before]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get endpoints myboot -o json | jq '.subsets[].addresses[].ip'\n----\n\nThese are the Pod IPs that have passed their readiness probes:\n\n[.console-output]\n[source.bash]\n----\n\"10.129.2.40\"\n\"10.130.2.37\"\n\"10.130.2.38\"\n----\n\n== Readiness Probe\n\nExec into a single Pod and change its readiness flag:\n\n[#misbehave-app-live-ready]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl exec -it myboot-845968c6ff-k5lcb -- /bin/bash\n----\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncurl localhost:8080/misbehave\nexit\n----\n\nSee that the pod is no longer Ready:\n\n[.console-output]\n[source.bash]\n----\nNAME                      READY   STATUS    RESTARTS   AGE\nmyboot-845968c6ff-9wshg   1/1     Running   0          11m\nmyboot-845968c6ff-k5lcb   0/1     Running   0          12m\nmyboot-845968c6ff-zsgx2   1/1     Running   0          11m\n----\n\nNow check the Endpoints:\n\n[#get-endpoints-after]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get endpoints myboot -o json | jq '.subsets[].addresses[].ip'\n----\n\nAnd that pod is now missing from the Service's loadbalancer:\n\n[.console-output]\n[source.bash]\n----\n\"10.130.2.37\"\n\"10.130.2.38\"\n----\n\nWhich is also self-evident in the curl loop:\n\n[.console-output]\n[source.bash]\n----\nAloha from Spring Boot! 845 on myboot-845968c6ff-9wshg\nAloha from Spring Boot! 604 on myboot-845968c6ff-zsgx2\nAloha from Spring Boot! 846 on myboot-845968c6ff-9wshg\n----\n\n== Liveness Probe\n\n[#change-deployment-v3-live-ready]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl set image deployment/myboot myboot=quay.io/rhdevelopers/myboot:v3\n----\n\nLet the rollout finish to completion across all 3 replicas:\n\n[.console-output]\n[source.bash]\n----\nkubectl get pods -w\nNAME                      READY   STATUS    RESTARTS   AGE\nmyboot-56659c9d69-6sglj   1/1     Running   0          2m2s\nmyboot-56659c9d69-mdllq   1/1     Running   0          97s\nmyboot-56659c9d69-zjt6q   1/1     Running   0          72s\n----\n\nAnd as seen in the curl loop/poller:\n\n[.console-output]\n[source.bash]\n----\nJambo from Spring Boot! 40 on myboot-56659c9d69-mdllq\nJambo from Spring Boot! 26 on myboot-56659c9d69-zjt6q\nJambo from Spring Boot! 71 on myboot-56659c9d69-6sglj\n----\n\nEdit the Deployment to point to the /alive URL:\n\ninclude::partial$tip_vscode_kube_editor.adoc[]\n\n[#change-liveness-v3-live-ready]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl edit deployment myboot\n----\n\nAnd change the liveness probe:\n\n[.console-output]\n[source.bash]\n----\n...\n    spec:\n      containers:\n      - image: quay.io/rhdevelopers/myboot:v3\n        imagePullPolicy: Always\n        livenessProbe:\n          failureThreshold: 3\n          httpGet:\n            path: /alive\n            port: 8080\n            scheme: HTTP\n          initialDelaySeconds: 10\n          periodSeconds: 5\n          successThreshold: 1\n          timeoutSeconds: 2\n        name: myboot\n...\n----\n\nSave and close the editor, allowing that change to rollout:\n\n[.console-input]\n[source,bash]\n----\nkubectl get pods -w\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                      READY   STATUS        RESTARTS   AGE\nmyboot-558b4f8678-nw762   1/1     Running       0          59s\nmyboot-558b4f8678-qbrgc   1/1     Running       0          81s\nmyboot-558b4f8678-z7f9n   1/1     Running       0          36s\n----\n\nNow pick one of the pods, `exec` into it and shoot it:\n\n[#shot-v3-live-ready]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl exec -it myboot-558b4f8678-qbrgc -- /bin/bash\n----\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncurl localhost:8080/shot\n----\n\nAnd you will see it get restarted:\n\n[.console-output]\n[source.bash]\n----\nNAME                      READY   STATUS    RESTARTS   AGE\nmyboot-558b4f8678-nw762   1/1     Running   0          4m7s\nmyboot-558b4f8678-qbrgc   1/1     Running   1          4m29s\nmyboot-558b4f8678-z7f9n   1/1     Running   0          3m44s\n----\n\nPlus, your exec will be terminated:\n\n[.console-input]\n[source,bash]\n----\nkubectl exec -it myboot-558b4f8678-qbrgc -- /bin/bash\n----\n\n[.console-output]\n[source.bash]\n----\ncurl localhost:8080/shot\n----\n\n[.console-output]\n[source.bash]\n----\nI have been shot in the head1000610000@myboot-558b4f8678-qbrgc:/app$ command terminated with exit code 137\n----\n\nAnd your end-users will not see any errors:\n\n[.console-output]\n[source.bash]\n----\nJambo from Spring Boot! 174 on myboot-558b4f8678-z7f9n\nJambo from Spring Boot! 11 on myboot-558b4f8678-qbrgc\nJambo from Spring Boot! 12 on myboot-558b4f8678-qbrgc\nJambo from Spring Boot! 206 on myboot-558b4f8678-nw762\nJambo from Spring Boot! 207 on myboot-558b4f8678-nw762\nJambo from Spring Boot! 175 on myboot-558b4f8678-z7f9n\nJambo from Spring Boot! 176 on myboot-558b4f8678-z7f9n\n----\n\n== Clean up\n\n[#cleanup-live-ready]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete deployment myboot\n----\n\n== Startup Probe\n\nSome applications require an additional startup time on their first initialization.\n\nIt might be tricky to fit this scenario into the liveness/readiness probes as you need to configure them for their normal behaviour to detect abnormalities during the running time and moreover covering the long start up time.\n\n:quick-open-file: myboot-deployment-live-ready-aggressive.yml\n\nFor instance, what if we had an application that might deadlock and we want to catch such issues immediately, we might have liveness and readiness probes that look like in `apps/kubefiles/{quick-open-file}`\n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[.console-output]\n[source,yaml]\n.{quick-open-file}\n----\ninclude::example$myboot-deployment-live-ready-aggressive.yml[]\n----\n\nThen apply that deployment\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-deployment-live-ready-aggressive.yml\n----\n\nAs we'll see from the pod watch, the pods are continually getting restarted, sometimes after it successfully boots up (because kubelet schedules for restart) and this is due to the startup time of SpringBoot.\n\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl describe pods\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nEvents:\n  Type     Reason     Age                 From               Message\n  ----     ------     ----                ----               -------\n  Normal   Scheduled  96s                 default-scheduler  Successfully assigned myspace/myboot-849ccd6948-8vrfq to devnation\n  Normal   Pulled     92s                 kubelet            Successfully pulled image \"quay.io/rhdevelopers/myboot:v1\" in 3.295180194s\n  Normal   Created    55s (x2 over 92s)   kubelet            Created container myboot\n  Normal   Started    55s (x2 over 92s)   kubelet            Started container myboot\n  Normal   Pulled     55s                 kubelet            Successfully pulled image \"quay.io/rhdevelopers/myboot:v1\" in 3.289395484s\n  Warning  Unhealthy  52s (x4 over 90s)   kubelet            Liveness probe failed: Get \"http://172.17.0.4:8080/alive\": dial tcp 172.17.0.4:8080: connect: connection refused\n  Normal   Killing    52s (x2 over 88s)   kubelet            Container myboot failed liveness probe, will be restarted\n  Normal   Pulling    22s (x3 over 95s)   kubelet            Pulling image \"quay.io/rhdevelopers/myboot:v1\"\n  Warning  Unhealthy  19s (x10 over 88s)  kubelet            Readiness probe failed: Get \"http://172.17.0.4:8080/health\": dial tcp 172.17.0.4:8080: connect: connection refused\n----\n\n*Startup probes* fix this problem, as once the startup probe has succeeded, the rest of the probes take over, but until the startup probe passes, neither the liveness nor the readiness probes can run. \n\n:quick-open-file: myboot-deployment-startup-live-ready.yml\n\n`{quick-open-file}` is an example of a deployment with just such a probe\n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[.console-output]\n[source,yaml]\n.{quick-open-file}\n----\ninclude::example$myboot-deployment-startup-live-ready.yml[]\n----\n\nYou'll see the difference is this section\n\n[.console-output]\n[source,yaml]\n----\n        startupProbe:\n          httpGet:\n            path: /alive\n            port: 8080\n          failureThreshold: 6\n          periodSeconds: 5\n          timeoutSeconds: 1\n----\n\nThen apply that deployment\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-deployment-startup-live-ready.yml\n----\n\nThe startup probe waits for 30 seconds (`5 * 6`) to startup the application.  Notice, too, that the delay on the liveness and readiness checks has gone down to 0.\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get pods -w\n----\n\n[.console-output]\n[source.bash]\n----\nNAME                      READY   STATUS    RESTARTS   AGE\nmyboot-579cc5cc47-2bk5p   0/1     Running   0          67s\n----\n\nEventually your curl loop should show the pod running\n\n----\nAloha from Spring Boot! 18 on myboot-849ccd6948-8vrfq\nAloha from Spring Boot! 19 on myboot-849ccd6948-8vrfq\nAloha from Spring Boot! 20 on myboot-849ccd6948-8vrfq\nAloha from Spring Boot! 21 on myboot-849ccd6948-8vrfq\n----\n\nLet's show that the liveness probe has taken over.\nNow pick one of the pods, `exec` into it and shoot it:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl exec -it myboot-558b4f8678-qbrgc -- /bin/bash\n----\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncurl localhost:8080/shot\n----\n\nAnd you will see it get restarted.\n\n\nDescribe the pod to get the statistics of probes:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl describe pod myboot-579cc5cc47-2bk5p\n----\n\n[.console-output]\n[source.yaml]\n----\nLimits:\n  cpu:     1\n  memory:  400Mi\nRequests:\n  cpu:        250m\n  memory:     300Mi\nLiveness:     http-get http://:8080/ delay=10s timeout=2s period=5s #success=1 #failure=3\nReadiness:    http-get http://:8080/health delay=10s timeout=1s period=3s #success=1 #failure=3\nStartup:      http-get http://:8080/alive delay=0s timeout=1s period=5s #success=1 #failure=12\nEnvironment:  <none>\nMounts:\n----\n\n== Clean Up\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete deployment myboot\nkubectl delete svc myboot\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/logs.adoc",
    "content": "= Logs\n\nThere are various \"production-ready\" ways to do log gathering and viewing across a Kubernetes/OpenShift cluster. Many folks like some flavor of ELK (ElasticSearch, Logstash, Kibana) or EFK (ElasticSearch, FluentD, Kibana).\n\nThe focus here is on things a developer needs to get access to do in order to help understand the behavior of their application running inside of a pod.\n\nMake sure you have an application (Deployment) running:\n\n[#create-deployment]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncat <<EOF | kubectl apply -f -\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment\nspec:\n  replicas: 3\n  selector:\n    matchLabels:\n      app: myapp\n  template:\n    metadata:\n      labels:\n        app: myapp\n        env: dev\n    spec:\n      containers:\n      - name: myapp\n        image: quay.io/rhdevelopers/myboot:v1\n        imagePullPolicy: Always\n        ports:\n        - containerPort: 8080\nEOF\n----\n\nMake sure you are running 3 replicas (3 pods/instances of your application):\n\n[#logs-get-replicas]\n[.console-input]\n[source, bash]\n----\nkubectl get deployment my-deployment -o jsonpath='{.status.replicas}{\"\\n\"}'\n----\n\nIf not, scale up to 3:\n\n[#logs-scale-replicas]\n[.console-input]\n[source, bash]\n----\nkubectl scale --replicas=3 deployment/my-deployment\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                             READY   STATUS    RESTARTS   AGE\nmy-deployment-5dc67997c7-5bq4n   1/1     Running   0          34s\nmy-deployment-5dc67997c7-m7z9f   1/1     Running   0          34s\nmy-deployment-5dc67997c7-s4jc6   1/1     Running   0          34s\n----\n\n[#logs-log-deployment]\n[.console-input]\n[source, bash]\n----\nkubectl logs my-deployment-5dc67997c7-m7z9f\n----\n\n[.console-output]\n[source]\n----\n  .   ____          _            __ _ _\n /\\\\ / ___'_ __ _ _(_)_ __  __ _ \\ \\ \\ \\\n( ( )\\___ | '_ | '_| | '_ \\/ _` | \\ \\ \\ \\\n \\\\/  ___)| |_)| | | | | || (_| |  ) ) ) )\n  '  |____| .__|_| |_|_| |_\\__, | / / / /\n =========|_|==============|___/=/_/_/_/\n :: Spring Boot ::        (v1.5.3.RELEASE)\n\n----\n\nYou can follow logs with `-f` parameter:\n\n[#logs-log-deployment-follow]\n[.console-input]\n[source, bash]\n----\nkubectl logs my-deployment-5dc67997c7-m7z9f -f\n----\n\nAnd in another Terminal:\n\n[.console-input]\n[source,bash]\n----\nkubectl exec -it my-deployment-5dc67997c7-m7z9f -- /bin/bash\ncurl localhost:8080\n----\n\n[.console-output]\n[source,bash]\n----\nAloha from my-deployment-5dc67997c7-m7z9f\n----\n\nDeploy a Service for `my-deployment`:\n\n[#create-service]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: Service\nmetadata:\n  name: the-service\nspec:\n  selector:\n    app: myapp\n  ports:\n    - protocol: TCP\n      port: 80\n      targetPort: 8080\n  type: LoadBalancer\nEOF\n----\n\nIn another Terminal, loop and curl that service:\n\n:section-k8s: logs\n:service-exposed: the-service\ninclude::partial$env-curl.adoc[]\n\nStart sending the request in a loop:\n\ninclude::partial$loop.adoc[]\n\nShow the logs from all pods appending the `--all-pods` option:\n\n[#all-pods]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl logs -f deployments/my-deployment --all-pods\n----\n\n[.console-output]\n[source,bash]\n----\nmy-deployment-5dc67997c7-5bq4n myapp Aloha from my-deployment-5dc67997c7-5bq4n\nmy-deployment-5dc67997c7-m7z9f myapp Aloha from my-deployment-5dc67997c7-m7z9f\nmy-deployment-5dc67997c7-s4jc6 myapp Aloha from my-deployment-5dc67997c7-s4jc6\nmy-deployment-5dc67997c7-s4jc6 myapp Aloha from my-deployment-5dc67997c7-s4jc6\n----\n\nOther popular tools to show the logs are:\n\n. https://github.com/wercker/stern[stern]\n. https://github.com/boz/kail[kail]:\n\n[TIP]\n====\nTo potentially pull the logs from a failing pod use `-p`\n\n[.console-input]\n[source,bash]\n----\nkubectl logs my-deployment-5dc67997c7-s4jc6 -p \n----\n====\n\n== Clean Up\n\n[#clean-up]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete service the-service\nkubectl delete deployment my-deployment\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/pod-rs-deployment.adoc",
    "content": "= Pod, ReplicaSet, Deployment\n\nFirst create a namespace to work in:\n\n[#create-namespace]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl create namespace myspace\nkubectl config set-context --current --namespace=myspace\n----\n\n== Pod\n\nCreate a https://kubernetes.io/docs/concepts/configuration/overview/#naked-pods-vs-replicasets-deployments-and-jobs[naked pod]:\n\n[#create-pod]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: Pod\nmetadata:\n  name: quarkus-demo\nspec:\n  containers:\n  - name: quarkus-demo\n    image: quay.io/rhdevelopers/quarkus-demo:v1\n    securityContext:\n      seccompProfile:\n        type: RuntimeDefault\nEOF\n----\n\nWatch the pod lifecycle:\n\n[#watch-pod]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get pods -w\n----\n\n[.console-output]\n[source,bash]\n----\nNAME           READY   STATUS              RESTARTS   AGE\nquarkus-demo   0/1     ContainerCreating   0          10s\n----\n\nFrom ContainerCreating to Running with Ready 1/1:\n\n[.console-output]\n[source,bash]\n----\nNAME           READY   STATUS    RESTARTS   AGE\nquarkus-demo   1/1     Running   0          18s\n----\n\nVerify the application in the Pod:\n\n[#verify-pod]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl exec -it quarkus-demo -- /bin/sh\n----\n\nRun the next command.\nNotice that as you are inside the container instance, the hostname is `localhost`.\n\n[#curl-inside-pod]\n[.console-input]\n[source,bash]\n----\ncurl localhost:8080\n----\n\n[.console-output]\n[source,bash]\n----\nSupersonic Subatomic Java with Quarkus quarkus-demo:1\nexit\n----\n\nLet's delete the previous Pod:\n\n[#delete-naked-pod]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete pod quarkus-demo\n----\n\n[#watch-all-naked-pod]\n[.console-input]\n[source,bash]\n----\nkubectl get pods -w\n----\n\n[.console-output]\n[source,bash]\n----\nNAME           READY   STATUS        RESTARTS   AGE\nquarkus-demo   0/1     Terminating   0          9m35s\n\nNo resources found in myspace namespace.\n----\n\nA Naked Pod disappears forever.\n\n== ReplicaSet\n\nCreate a ReplicaSet:\n\n[#create-replicaset]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncat <<EOF | kubectl apply -f -\napiVersion: apps/v1\nkind: ReplicaSet\nmetadata:\n    name: rs-quarkus-demo\nspec:\n    replicas: 3\n    selector:\n       matchLabels:\n          app: quarkus-demo\n    template:\n       metadata:\n          labels:\n             app: quarkus-demo\n             env: dev\n       spec:\n          containers:\n          - name: quarkus-demo\n            image: quay.io/rhdevelopers/quarkus-demo:v1\nEOF\n----\n\nGet the pods with labels:\n\n[#replicaset-show-labels]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get pods -w --show-labels\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                    READY   STATUS    RESTARTS   AGE   LABELS\nrs-quarkus-demo-jd6jk   1/1     Running   0          58s   app=quarkus-demo,env=dev\nrs-quarkus-demo-mlnng   1/1     Running   0          58s   app=quarkus-demo,env=dev\nrs-quarkus-demo-t26gt   1/1     Running   0          58s   app=quarkus-demo,env=dev\n----\n\n[#get-rs]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get rs\n----\n\n[.console-output]\n[source,bash]\n----\nNAME              DESIRED   CURRENT   READY   AGE\nrs-quarkus-demo   3         3         3       79s\n----\n\nDescribe the RS:\n\n[#describe-rs-quarkus-demo]\n[.console-input]\n[source,bash]\n----\nkubectl describe rs rs-quarkus-demo\n----\n\n[.console-output]\n[source,bash]\n----\nName:         rs-quarkus-demo\nNamespace:    myspace\nSelector:     app=quarkus-demo\nLabels:       <none>\nAnnotations:  kubectl.kubernetes.io/last-applied-configuration:\n                {\"apiVersion\":\"apps/v1\",\"kind\":\"ReplicaSet\",\"metadata\":{\"annotations\":{},\"name\":\"rs-quarkus-demo\",\"namespace\":\"myspace\"},\"spec\":{\"replicas...\nReplicas:     3 current / 3 desired\nPods Status:  3 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n  Labels:  app=quarkus-demo\n           env=dev\n  Containers:\n   quarkus-demo:\n    Image:        quay.io/rhdevelopers/quarkus-demo:v1\n    Port:         <none>\n    Host Port:    <none>\n    Environment:  <none>\n    Mounts:       <none>\n  Volumes:        <none>\nEvents:\n  Type    Reason            Age   From                   Message\n  ----    ------            ----  ----                   -------\n  Normal  SuccessfulCreate  89s   replicaset-controller  Created pod: rs-quarkus-demo-jd6jk\n  Normal  SuccessfulCreate  89s   replicaset-controller  Created pod: rs-quarkus-demo-t26gt\n  Normal  SuccessfulCreate  89s   replicaset-controller  Created pod: rs-quarkus-demo-mlnng\n----\n\nPods are \"owned\" by the ReplicaSet:\n\n[#rs-owned-ref]\n[.console-input]\n[source,bash]\n----\nkubectl get pod rs-quarkus-demo-mlnng -o jsonpath='{.metadata.ownerReferences[]}'\n----\n\n[.console-output]\n[source,bash]\n----\n{\n  \"apiVersion\": \"apps/v1\",\n  \"blockOwnerDeletion\": true,\n  \"controller\": true,\n  \"kind\": \"ReplicaSet\",\n  \"name\": \"rs-quarkus-demo\",\n  \"uid\": \"1ed3bb94-dfa5-40ef-8f32-fbc9cf265324\"\n}\n----\n\nNow delete a pod, while watching pods:\n\n[#delete-pod-rs]\n[.console-input]\n[source,bash]\n----\nkubectl delete pod rs-quarkus-demo-mlnng\n----\n\nAnd a new pod will spring to life to replace it:\n\n[.console-output]\n[source,bash]\n----\nNAME                    READY   STATUS              RESTARTS   AGE    LABELS\nrs-quarkus-demo-2txwk   0/1     ContainerCreating   0          2s     app=quarkus-demo,env=dev\nrs-quarkus-demo-jd6jk   1/1     Running             0          109s   app=quarkus-demo,env=dev\nrs-quarkus-demo-t26gt   1/1     Running             0          109s   app=quarkus-demo,env=dev\n----\n\nDelete the ReplicaSet to remove all the associated pods:\n\n[#delete-rs]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete rs rs-quarkus-demo\n----\n\n== Deployment\n\n[#create-deployment]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncat <<EOF | kubectl apply -f -\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: quarkus-demo-deployment\nspec:\n  replicas: 3\n  selector:\n    matchLabels:\n      app: quarkus-demo\n  template:\n    metadata:\n      labels:\n        app: quarkus-demo\n        env: dev\n    spec:\n      containers:\n      - name: quarkus-demo\n        image: quay.io/rhdevelopers/quarkus-demo:v1\n        imagePullPolicy: Always\n        ports:\n        - containerPort: 8080\nEOF\n----\n\n[#pod-show-labels-dep]\n[.console-input]\n[source,bash]\n----\nkubectl get pods --show-labels\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                                       READY   STATUS    RESTARTS   AGE   LABELS\nquarkus-demo-deployment-5979886fb7-c888m   1/1     Running   0          17s   app=quarkus-demo,env=dev,pod-template-hash=5979886fb7\nquarkus-demo-deployment-5979886fb7-gdtnz   1/1     Running   0          17s   app=quarkus-demo,env=dev,pod-template-hash=5979886fb7\nquarkus-demo-deployment-5979886fb7-grf59   1/1     Running   0          17s   app=quarkus-demo,env=dev,pod-template-hash=5979886f\n----\n\n[#exec-inside-pod-labels]\n[.console-input]\n[source,bash]\n----\nkubectl exec -it quarkus-demo-deployment-5979886fb7-c888m -- curl localhost:8080\n----\n\n[.console-output]\n[source,bash]\n----\nSupersonic Subatomic Java with Quarkus quarkus-demo-deployment-5979886fb7-c888m:1\n----\n\nIn the next section, you'll learn the concept of `Service`.\nThis is an important element in Kubernetes ecosystem.\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/resources.adoc",
    "content": "= Resources and Limits\n\nMake sure you are in the correct namespace:\n\n:section-k8s: resource\n:set-namespace: myspace\n\n[TIP]\n====\nYou will need to create the `{set-namespace}` if you haven't already.  Check for the existence of the namespace with\n\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\nkubectl get ns {set-namespace}\n----\n\nIf the response is: \n\n[.console-output]\n[source,bash, subs=\"+attributes\"]\n----\nError from server (NotFound): namespaces \"{set-namespace}\" not found\n----\n\nThen you can create the namespace with: \n\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\nkubectl create ns {set-namespace}\n----\n====\n\ninclude::partial$set-context.adoc[]\n\nMake sure nothing is running in your namespace:\n\n[#no-resources-resource]\n[.console-input]\n[source, bash]\n----\nkubectl get all\n----\n\n[.console-output]\n[source,bash]\n----\nNo resources found in myspace namespace.\n----\n\nFirst deploy an application without any Requests or Limits:\n\n[#no-limits-resource]\n[.console-input]\n[source, bash]\n----\nkubectl apply -f apps/kubefiles/myboot-deployment.yml\n----\n\nDescribe the pod:\n\n:section-k8s: resource\n:label-describe: app=myboot\ninclude::partial$describe.adoc[]\n\nThere are not resource limits configured for the pod.\n\n[.console-output]\n[source,bash]\n----\nName:         myboot-66d7d57687-jzbzj\nNamespace:    myspace\nPriority:     0\nNode:         gcp-5xldg-w-b-rlp45.us-central1-b.c.ocp42project.internal/10.0.32.5\nStart Time:   Sun, 29 Mar 2020 14:24:24 -0400\nLabels:       app=myboot\n              pod-template-hash=66d7d57687\nAnnotations:  k8s.v1.cni.cncf.io/networks-status:\n                [{\n                    \"name\": \"openshift-sdn\",\n                    \"interface\": \"eth0\",\n                    \"ips\": [\n                        \"10.130.2.23\"\n                    ],\n                    \"dns\": {},\n                    \"default-route\": [\n                        \"10.130.2.1\"\n                    ]\n                }]\n              openshift.io/scc: restricted\nStatus:       Running\nIP:           10.130.2.23\nIPs:\n  IP:           10.130.2.23\nControlled By:  ReplicaSet/myboot-66d7d57687\nContainers:\n  myboot:\n    Container ID:   cri-o://2edfb0a5a93f375516ee49d33df20bee40c14792b37ec1648dc5205244095a53\n    Image:          quay.io/burrsutter/myboot:v1\n    Image ID:       quay.io/burrsutter/myboot@sha256:cdf39f191f5d322ebe6c04cae218b0ad8f6dbbb8a81e81a88c0fbc6e3c05f860\n    Port:           8080/TCP\n    Host Port:      0/TCP\n    State:          Running\n      Started:      Sun, 29 Mar 2020 14:24:32 -0400\n    Ready:          True\n    Restart Count:  0\n    Environment:    <none>\n    Mounts:\n      /var/run/secrets/kubernetes.io/serviceaccount from default-token-vlzsl (ro)\nConditions:\n  Type              Status\n  Initialized       True\n  Ready             True\n  ContainersReady   True\n  PodScheduled      True\nVolumes:\n  default-token-vlzsl:\n    Type:        Secret (a volume populated by a Secret)\n    SecretName:  default-token-vlzsl\n    Optional:    false\nQoS Class:       BestEffort\nNode-Selectors:  <none>\nTolerations:     node.kubernetes.io/not-ready:NoExecute for 300s\n                 node.kubernetes.io/unreachable:NoExecute for 300s\nEvents:\n  Type    Reason     Age        From                                                                Message\n  ----    ------     ----       ----                                                                -------\n  Normal  Scheduled  <unknown>  default-scheduler                                                   Successfully assigned myspace/myboot-66d7d57687-jzbzj to gcp-5xldg-w-b-rlp45.us-central1-b.c.ocp42project.internal\n  Normal  Pulled     12m        kubelet, gcp-5xldg-w-b-rlp45.us-central1-b.c.ocp42project.internal  Container image \"quay.io/burrsutter/myboot:v1\" already present on machine\n  Normal  Created    12m        kubelet, gcp-5xldg-w-b-rlp45.us-central1-b.c.ocp42project.internal  Created container myboot\n  Normal  Started    12m        kubelet, gcp-5xldg-w-b-rlp45.us-central1-b.c.ocp42project.internal  Started container myboot\n----\n\nDelete that deployment:\n\n[#delete-deployment-resource]\n[.console-input]\n[source, bash]\n----\nkubectl delete deployment myboot\n----\n\nCreate a new deployment with resource requests:\n\n[#limits-resource]\n[.console-input]\n[source, bash]\n----\nkubectl apply -f apps/kubefiles/myboot-deployment-resources.yml\n----\n\nAnd check the status of the Pod:\n\n[#limits-get-pod-resource]\n[.console-input]\n[source, bash]\n----\nkubectl get pods\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                      READY   STATUS    RESTARTS   AGE\nmyboot-7b7d754c86-kjwlr   0/1     Pending   0          19s\n----\n\nIf you want to get more information about the error:\n\n[#get-events-resource]\n[.console-input]\n[source, bash]\n----\nkubectl get events --sort-by=.metadata.creationTimestamp\n----\n\n[.console-output]\n[source,bash]\n----\n<unknown>   Warning   FailedScheduling    pod/myboot-7b7d754c86-kjwlr    0/6 nodes are available: 6 Insufficient cpu.\n<unknown>   Warning   FailedScheduling    pod/myboot-7b7d754c86-kjwlr    0/6 nodes are available: 6 Insufficient cpu.\n----\n\nThe \"resource requests\" of the pod specification require that at least one worker node has N cores and X memory available.  If there is no worker node that meets the requirements, you receive \"PENDING\" and the appropriate notations in the events listing.\n\nYou can also use `kubectl describe` on the pod to find more information about the failure.\n\n:section-k8s: resource-limit\n:label-describe: app=myboot\ninclude::partial$describe.adoc[]\n\nWe should fix the deployment while keeping a history of changes done by `replace`:\n\n[#apply-deployment-sane-limit-resource]\n[.console-input]\n[source, bash]\n----\nkubectl replace -f apps/kubefiles/myboot-deployment-resources-limits.yml\n----\n\nThe above command will replace the Deployment template and instruct the Pod to have container limits.\nDescribe the Pod:\n\n:section-k8s: resource-soft-limit\n:label-describe: app=myboot\ninclude::partial$describe.adoc[]\n\nDeploy the service:\n\n[#apply-service-sane-limit-resource]\n[.console-input]\n[source, bash]\n----\nkubectl apply -f apps/kubefiles/myboot-service.yml\n----\n\nAnd watch your Pods:\n\n:section-k8s: resources\ninclude::partial$watching-pods.adoc[]\n\nIn another Terminal, loop and curl that service:\n\n:section-k8s: resource-soft-limit\n:service-exposed: myboot\ninclude::partial$env-curl.adoc[]\n\nExecute in loop:\n\ninclude::partial$loop.adoc[]\n\nIn yet another terminal window, curl the /sysresources endpoint:\n\n[#sysresources-sane-limit-resource]\n[.console-input]\n[source, bash]\n----\ncurl $IP:$PORT/sysresources\n----\n\nNOTE: The reported memory vs what was set in the resource limits\n\n[#podresources-sane-limit-resource]\n[.console-input]\n[source, bash]\n----\nPODNAME=$(kubectl get pod -l app=myboot -o name)\nkubectl get $PODNAME -o jsonpath='{.spec.containers[*].resources}'\n----\n\n[.console-output]\n[source,bash]\n----\n{\n  \"limits\": {\n    \"cpu\": \"1\",\n    \"memory\": \"400Mi\"\n  },\n  \"requests\": {\n    \"cpu\": \"250m\",\n    \"memory\": \"300Mi\"\n  }\n}\n----\n\nThen `curl` the `/consume` endpoint:\n\n[#consume-sane-limit-resource]\n[.console-input]\n[source, bash]\n----\ncurl $IP:$PORT/consume\n----\n\n[.console-output]\n[source,bash]\n----\ncurl: (52) Empty reply from server\n----\n\nAnd you should notice that your loop also fails:\n\n[.console-output]\n[source,bash]\n----\nAloha from Spring Boot! 1120 on myboot-d78fb6d58-69kl7\ncurl: (56) Recv failure: Connection reset by peer\n----\n\nDescribe the Pod to see the error:\n\n:section-k8s: resource-soft-limit-fail\n:label-describe: app=myboot\ninclude::partial$describe.adoc[]\n\nAnd look for the following part:\n\n[.console-output]\n[source,bash]\n----\n   Last State:     Terminated\n      Reason:       OOMKilled\n      Exit Code:    137\n----\n\n[#terminated-pod-resource]\n[.console-input]\n[source, bash]\n----\n kubectl get $PODNAME -o jsonpath='{.status.containerStatuses[0].lastState.terminated'}\n----\n\n[.console-output]\n[source,bash]\n----\n{\n  \"containerID\": \"cri-o://7b9be70ce4b616d6083d528dee708cea879da967373dad0d396fb999bd3898d3\",\n  \"exitCode\": 137,\n  \"finishedAt\": \"2020-03-29T19:14:56Z\",\n  \"reason\": \"OOMKilled\",\n  \"startedAt\": \"2020-03-29T18:50:15Z\"\n}\n----\n\nYou might even see the STATUS column of the `kubectl get pods -w` reflect the OOMKilled:\n\n[.console-output]\n[source,bash]\n----\nNAME                     READY   STATUS      RESTARTS   AGE\nmyboot-d78fb6d58-69kl7   0/1     OOMKilled   1          30m\n----\n\nAnd you will notice that the RESTARTS column increments with each crash of the Spring Boot Pod.\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/rolling-updates.adoc",
    "content": "= Rolling updates\n\nMake sure you are in the correct namespace\n\n:section-k8s: rolling\n:set-namespace: myspace\ninclude::partial$set-context.adoc[]\n\n[TIP,subs=\"attributes+,+macros\"]\n====\nIf you just came from xref::resources.adoc[the Resources and Limits section, window=_blank] then you should already have the pods and deployments active that you need.  If not, you will need run the following commands to deploy the needed elements into {set-namespace}\n====\n\nDeploy the Spring Boot app if needed:\n\n[#deploy-myboot-rolling]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-deployment-resources-limits.yml\nkubectl apply -f apps/kubefiles/myboot-service.yml\n----\n\n*Terminal 1*: watch the Pods.\n\ninclude::partial$watching-pods.adoc[]\n\n*Terminal 2*: curl loop the service.\n\n:service-exposed: myboot\n\ninclude::partial$env-curl.adoc[]\n\nAnd run loop script:\n\ninclude::partial$loop.adoc[]\n\n\n*Terminal 3* : Run commands.\n\nDescribe (or `kubectl edit`) the Deployment:\n\n:describe-deployment-name: myboot\n:section-k8s: rolling-init\n\ninclude::partial$describe-deployment.adoc[]\n\n// The .no-query-replace tells the course ui to not attempt to replace tokens between % %\n[.no-query-replace]\n[.console-output]\n[source,bash]\n----\n.\n.\n.\nReplicas:               1 desired | 1 updated | 1 total | 1 available | 0 unavailable\nStrategyType:           RollingUpdate\nMinReadySeconds:        0\nRollingUpdateStrategy:  25% max unavailable, 25% max surge\n.\n.\n.\n----\n\n`StrategyType` options include `RollingUpdate` and `Recreate`:\n\nChange the replicas:\n\ninclude::partial$tip_vscode_kube_editor.adoc[]\n\n[#edit-deployment-replicas-rolling]\n[.console-input]\n[source, bash]\n----\nkubectl edit deployment myboot\n----\n\nLook for \"replicas\":\n\n[.console-output]\n[source,yaml]\n----\nspec:\n  progressDeadlineSeconds: 600\n  replicas: 1\n  revisionHistoryLimit: 10\n  selector:\n    matchLabels:\n      app: myboot\n----\n\nAnd update to \"2\":\n\n[.console-output]\n[source, yaml]\n----\nspec:\n  progressDeadlineSeconds: 600\n  replicas: 2\n  revisionHistoryLimit: 10\n  selector:\n    matchLabels:\n      app: myboot\n----\n\nSave and close your editor and a new pod will come to life:\n\n[#edit-deployment-replicas-get-pod-rolling]\n[.console-input]\n[source, bash]\n----\nkubectl get pods\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                     READY   STATUS    RESTARTS   AGE\nmyboot-d78fb6d58-2fqml   1/1     Running   0          25s\nmyboot-d78fb6d58-ljkjp   1/1     Running   0          3m\n----\n\nChange the image associated with the deployment:\n\n[#edit-deployment-v2-rolling]\n[.console-input]\n[source, bash]\n----\nkubectl edit deployment myboot\n----\n\nFind the image attribute:\n\n[source, yaml]\n----\n    spec:\n      containers:\n      - image: quay.io/rhdevelopers/myboot:v1\n        imagePullPolicy: IfNotPresent\n        name: myboot\n----\n\nand change the image `myboot:v2`:\n\n[source, yaml]\n----\n    spec:\n      containers:\n      - image: quay.io/rhdevelopers/myboot:v2\n        imagePullPolicy: IfNotPresent\n        name: myboot\n----\n\n[#edit-deployment-v2-get-pod-rolling]\n[.console-input]\n[source, bash]\n----\nkubectl get pods\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                      READY   STATUS              RESTARTS   AGE\nmyboot-7fbc4b97df-4ntmk   1/1     Running             0          9s\nmyboot-7fbc4b97df-qtkzj   0/1     ContainerCreating   0          0s\nmyboot-d78fb6d58-2fqml    1/1     Running             0          3m29s\nmyboot-d78fb6d58-ljkjp    1/1     Terminating         0          8m\n----\n\nAnd the output from terminal 2:\n\n[.console-output]\n[source,bash]\n----\nAloha from Spring Boot! 211 on myboot-d78fb6d58-2fqml\nAloha from Spring Boot! 212 on myboot-d78fb6d58-2fqml\nBonjour from Spring Boot! 0 on myboot-7fbc4b97df-4ntmk\nBonjour from Spring Boot! 1 on myboot-7fbc4b97df-4ntmk\n----\n\nCheck the status of the deployment:\n\n[#rollout-v2-rolling]\n[.console-input]\n[source, bash]\n----\nkubectl rollout status deployment myboot\n----\n\n[.console-output]\n[source,bash]\n----\ndeployment \"myboot\" successfully rolled out\n----\n\nNotice that there is a new RS:\n\n[#rs-v2-rolling]\n[.console-input]\n[source, bash]\n----\nkubectl get rs\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                DESIRED   CURRENT   READY   AGE\nmyboot-7fbc4b97df   2         2         2       116s\nmyboot-d78fb6d58    0         0         0       10m\n----\n\nDescribe the Deployment:\n\n\n:describe-deployment-name: myboot\n:section-k8s: rolling\n\ninclude::partial$describe-deployment.adoc[]\n\nAnd check out the Events section:\n\n[.console-output]\n[source,bash]\n----\n...\nEvents:\n  Type    Reason             Age    From                   Message\n  ----    ------             ----   ----                   -------\n  Normal  ScalingReplicaSet  16m    deployment-controller  Scaled up replica set myboot-d78fb6d58 to 1\n  Normal  ScalingReplicaSet  6m15s  deployment-controller  Scaled up replica set myboot-d78fb6d58 to 2\n  Normal  ScalingReplicaSet  2m55s  deployment-controller  Scaled up replica set myboot-7fbc4b97df to 1\n  Normal  ScalingReplicaSet  2m46s  deployment-controller  Scaled down replica set myboot-d78fb6d58 to 1\n  Normal  ScalingReplicaSet  2m46s  deployment-controller  Scaled up replica set myboot-7fbc4b97df to 2\n  Normal  ScalingReplicaSet  2m37s  deployment-controller  Scaled down replica set myboot-d78fb6d58 to 0\n----\n\nYou can list the revisions associated to your deployment by running the following command:\n[#rollout-history]\n[.console-input]\n[source, bash]\n----\nkubectl rollout history deployment/myboot\n----\n\nYou can rollback to v1 using the following command:\n\n[#describe-rollback-rolling]\n[.console-input]\n[source, bash]\n----\nkubectl rollout undo deployment/myboot --to-revision=1\n----\n\nAnd it rolls back to Aloha:\n\n[.console-output]\n[source,bash]\n----\nBonjour from Spring Boot! 501 on myboot-7fbc4b97df-qtkzj\nBonjour from Spring Boot! 502 on myboot-7fbc4b97df-qtkzj\nAloha from Spring Boot! 0 on myboot-d78fb6d58-vnlch\n----\n\n[IMPORTANT] \n====\nOn minikube, you may receive errors from curl during the rollover activity.\n\n[.console-output]\n[source,bash]\n----\nAloha from Spring Boot! 119 on myboot-d78fb6d58-2zp4h\ncurl: (7) Failed to connect to 192.168.99.100 port 31528: Connection refused\n----\n\nThe reason is the the missing Live and Ready Probes\n\nTry using the Quarkus image instead of the Spring Boot one\n\n[#describe-rollback-quarkus-rolling]\n[.console-input]\n[source, bash]\n----\nkubectl set image deployment/myboot myboot=quay.io/rhdevelopers/quarkus-demo:v1\n----\n\nAnd there should be no errors, Quarkus simply boots up crazy fast\n\n[.console-output]\n[source,bash]\n----\nAloha from Spring Boot! 62 on myboot-d78fb6d58-smb7h\nAloha from Spring Boot! 63 on myboot-d78fb6d58-smb7h\nSupersonic Subatomic Java with Quarkus myboot-5cf696848b-tlt6l:1\nSupersonic Subatomic Java with Quarkus myboot-5cf696848b-tlt6l:2\n----\n====\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/secrets.adoc",
    "content": "= Secrets\ninclude::_attributes.adoc[]\n:watch-terminal: Terminal 2\n\nSecrets are an out of the box way Kubernetes provides to store sensitive data.  Most similar to config maps, these are treated with a bit of extra care under the hood in Kubernetes.\n\nSecrets are meant to give developers a way of specifying common types of sensitive data (basic-auth credentials, image registry credentials, TLS certs, etc) without including it (insecurely) in the code (application or infrastructure) of their containerized application.  A typical generic secret that one will come across are the credentials for accessing a database.\n\nThe heart of any secret is not displayed in plain-text by default.  Instead, secret data is base64 encoded and needs to be decoded to be read.\n\n[WARNING]\n====\nLike most data in the Kubernetes API, secrets are stored within the `etc` distributed data store.  Whilst access to this data is mediated by the cluster's RBAC, it should be noted that Secrets are NOT encrypted at rest within `etcd` in Kubernetes by default.  This can be enabled on generic Kubernetes by following link:https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/[these instructions^].  OpenShift makes this even easier as documented link:https://docs.openshift.com/container-platform/4.7/security/encrypting-etcd.html[here^]\n====\n\n== Prerequisites\n\nMake sure you are in the correct namespace:\n\n:section-k8s: resource\n:set-namespace: myspace\n\ninclude::partial$namespace-setup-tip.adoc[]\n\ninclude::partial$set-context.adoc[]\n\nMake sure nothing is running in your namespace:\n\n[#no-resources-resource]\n[.console-input]\n[source, bash]\n----\nkubectl get all\n----\n\n[.console-output]\n[source,bash]\n----\nNo resources found in myspace namespace.\n----\n\nDeploy `myboot` service:\n\n[#deploy-myboot-secrets]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-deployment.yml\n----\n\nDeploy myboot Service:\n\n[#service-myboot-secrets]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-service.yml\n----\n\nIn a separate terminal (hereafter referred to as *{watch-terminal}*) set up a watch on the pods: \n\n:section-k8s: secrets\ninclude::partial$watching-pods-with-nodes.adoc[]\n\nMeanwhile, in the main terminal, send a request:\n\n:service-exposed: myboot\ninclude::partial$env-curl.adoc[]\n\nwhich should give us the by now familiar response\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nAloha from Spring Boot! 1 on myboot-7cbfbd9b89-dl2hv\n----\n\n== Creating Secrets\n\nPreviously, we used a `ConfigMap` to hold a database connection string (`user=MyUserName;password=pass:[*****]`). Instead, let's create a secret to hold this sensitive data.\n\nThe `kubectl` CLI has some support for creating generic (or `opaque`) secrets like the one we would use for a database login. \n\n[#create-secret-cli-secrets]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl create secret generic mysecret --from-literal=user='MyUserName' --from-literal=password='mypassword'\n----\n\n[#get-secret-cli-secrets]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get secrets\n----\n\nWhich will now yield output similar to the following\n\n[tabs]\n====\nMinikube::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                  TYPE                                  DATA   AGE\ndefault-token-nxkpw   kubernetes.io/service-account-token   3      5d12h\nmysecret              Opaque                                2      25s\n----\n--\nOpenShift::\n+\n--\n[.console-output]\n[source,bash]\n----\nNAME                       TYPE                                  DATA   AGE\nbuilder-dockercfg-96ml5    kubernetes.io/dockercfg               1      3d6h\nbuilder-token-h5g82        kubernetes.io/service-account-token   4      3d6h\nbuilder-token-vqjqz        kubernetes.io/service-account-token   4      3d6h\ndefault-dockercfg-bsnjr    kubernetes.io/dockercfg               1      3d6h\ndefault-token-bl77s        kubernetes.io/service-account-token   4      3d6h\ndefault-token-vlzsl        kubernetes.io/service-account-token   4      3d6h\ndeployer-dockercfg-k6npn   kubernetes.io/dockercfg               1      3d6h\ndeployer-token-4hb78       kubernetes.io/service-account-token   4      3d6h\ndeployer-token-vvh6r       kubernetes.io/service-account-token   4      3d6h\nmysecret                   Opaque                                2      5s\n----\n--\n====\n\nBecause this is a `Secret` and not a `ConfigMap`, the user & password are not immediately visible:\n\n[#describe-secret-cli-secrets]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl describe secret mysecret\n----\n\n[.console-output]\n[source,bash]\n----\nName:         mysecret\nNamespace:    myspace\nLabels:       <none>\nAnnotations:  <none>\n\nType:  Opaque\n\nData\n====\npassword:  10 bytes\nuser:      10 bytes\n----\n\n[#get-secret-cli-yaml-secrets]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get secret mysecret -o yaml\n----\n\n\n[source,yaml]\n----\napiVersion: v1\ndata:\n  password: bXlwYXNzd29yZA==\n  user: TXlVc2VyTmFtZQ==\nkind: Secret\nmetadata:\n  creationTimestamp: \"2020-03-31T20:19:26Z\"\n  name: mysecret\n  namespace: myspace\n  resourceVersion: \"4944690\"\n  selfLink: /api/v1/namespaces/myspace/secrets/mysecret\n  uid: e8c5f12e-bd71-4d6b-8d8c-7af9ed6439f8\ntype: Opaque\n----\n\nCopy the value of the password field above into the echo command below to prove that it is base64 encoded\n\n[#get-secret-cli-password-secrets]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\necho 'bXlwYXNzd29yZA==' | base64 --decode\n----\n\n[.console-output]\n[source,bash]\n----\nmypassword\n----\n\n[TIP]\n====\nIf pressed for time, you can run the following command instead\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nB64_PASSWORD=$(kubectl get secret mysecret -o jsonpath='{.data.password}')\necho \"password:$B64_PASSWORD is decoded as $(echo $B64_PASSWORD | base64 --decode)\"\n----\n\n====\n\nAnd then do the same for the username\n\n[#get-secret-cli-username-secrets]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\necho 'TXlVc2VyTmFtZQ==' | base64 --decode\n----\n\n[.console-output]\n[source,bash]\n----\nMyUserName\n----\n\n[TIP]\n====\nIf pressed for time, you can run the following command instead\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nB64_DATA=$(kubectl get secret mysecret -o jsonpath='{.data.user}')\necho \"username:$B64_DATA is decoded as $(echo $B64_DATA | base64 --decode)\"\n----\n\n====\n\n\nOr get them using `kubectl`:\n\n[#get-secret-kubectl-password-secrets]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get secret mysecret -o jsonpath='{.data.password}' | base64 --decode \n----\n\n== Using Secrets\n\n:quick-open-file: myboot-deployment-configuration-secret.yml\n\nLet's take a look at a deployment, `{quick-open-file}`, that will make use of our newly created secret. \n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[.console-output]\n[source,yaml,subs=\"+macros,+attributes\"]\n.{quick-open-file}\n----\ninclude::example$myboot-deployment-configuration-secret.yml[]\n----\n<.> This determines where the pod will find the secret.  It will be in a file in the `/mystuff/secretstuff` directory in the pod\n<.> This defines what `mysecretvolume` should actually mount.  In this case `mysecret`, the secret we just created above.\n\nOne way to allow deployments (pods) to use secrets is to provide them via Volume Mounts:\n\n[source, yaml]\n----\n        volumeMounts:          \n          - name: mysecretvolume\n            mountPath: /mystuff/mysecretvolume\n----\n\nLet's update our deployment to use this volume:\n\n[#replace-myboot-secrets]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl replace -f apps/kubefiles/myboot-deployment-configuration-secret.yml\n----\n\n_Once the deployment has been updated_, exec into the newly created Pod:\n\n[#print-secrets-volume-secrets]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nPODNAME=$(kubectl get pod -l app=myboot --field-selector pass:['status.phase!=Terminating'] -o name)\nkubectl exec $PODNAME -- ls -l /mystuff/secretstuff\nkubectl exec $PODNAME -- cat /mystuff/secretstuff/password\n----\n\nResults in:\n\n[.console-output]\n[source,bash]\n----\ntotal 0\nlrwxrwxrwx. 1 root root 15 Jul 19 03:37 password -> ..data/password #<.>\nlrwxrwxrwx. 1 root root 11 Jul 19 03:37 user -> ..data/user\nmypassword #<.>\n----\n<.> Refer back to the secret definition.  Each field under the `.data` section of the secret has become a file in this directory that represents the mounted secret\n<.> `cat` ing the value of the `password` file gives the value of the `.data.password` field in the `secret` we defined above\n\n[TIP]\n====\nAlternatively, you can just run the following command to rsh into the pod and poke around\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nPODNAME=$(kubectl get pod -l app=myboot --field-selector pass:['status.phase!=Terminating'] -o name)\nkubectl exec -it $PODNAME -- /bin/bash\n----\n====\n\n\nBut how would your application know to look in this directory for credentials?  Whilst it could be hardcoded in the application (or via properties) you could also provide the path via `/mystuff/mysecretvolume` to the pod via an environment variable so the application knows where to look.  \n\n[TIP]\n====\nIt's also possible to expose secrets directly as environment variables, but that's beyond the scope of this tutorial.\n====\n\nFor more information on secrets, see https://kubernetes.io/docs/concepts/configuration/secret/[here]\n\n== Clean Up\n\n[.console-input]\n[source,bash]\n----\nkubectl delete deployment myboot\nkubectl delete service myboot\n----\n\n\n\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/service-magic.adoc",
    "content": "= Service Magic\n\nCreate a Namespace:\n\n[#create-namespace]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl create namespace funstuff\nkubectl config set-context --current --namespace=funstuff\n----\n\n== Deploy mypython\n\n[#deploy-mypython]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncat <<EOF | kubectl apply -f -\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mypython-deployment\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mypython\n  template:\n    metadata:\n      labels:\n        app: mypython\n    spec:\n      containers:\n      - name: mypython\n        image: quay.io/rhdevelopers/mypython:v1\n        ports:\n        - containerPort: 8000\nEOF\n----\n\n== Deploy mygo\n\n[#deploy-mygo]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncat <<EOF | kubectl apply -f -\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mygo-deployment\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mygo\n  template:\n    metadata:\n      labels:\n        app: mygo\n    spec:\n      containers:\n      - name: mygo\n        image: quay.io/rhdevelopers/mygo:v1\n        ports:\n        - containerPort: 8000\nEOF\n----\n\n== Deploy mynode\n\n[#deploy-mynode]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncat <<EOF | kubectl apply -f -\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mynode-deployment\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mynode\n  template:\n    metadata:\n      labels:\n        app: mynode\n    spec:\n      containers:\n      - name: mynode\n        image: quay.io/rhdevelopers/mynode:v1\n        ports:\n        - containerPort: 8000\nEOF\n----\n\n[#labels-service-magic]\n[.console-input]\n[source, bash]\n----\nkubectl get pods -w --show-labels\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                                   READY   STATUS    RESTARTS   AGE     LABELS\nmygo-deployment-6d944c5c69-kcvmk       1/1     Running   0          2m11s   app=mygo,pod-template-hash=6d944c5c69\nmynode-deployment-fb5457c5-hhz7h       1/1     Running   0          2m1s    app=mynode,pod-template-hash=fb5457c5\nmypython-deployment-6874f84d85-2kpjl   1/1     Running   0          3m53s   app=mypython,pod-template-hash=6874f84d85\n----\n\n[#deploy-myservice]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: Service\nmetadata:\n  name: my-service\n  labels:\n    app: mystuff\nspec:\n  ports:\n  - name: http\n    port: 8000\n  selector:\n    inservice: mypods\n  type: LoadBalancer\nEOF\n----\n\n[#describe-myservice-service-magic]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl describe service my-service\n----\n\n[#get-endpoints-myservice]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get endpoints\n----\n\n[.console-output]\n[source,bash]\n----\nNAME         ENDPOINTS   AGE\nmy-service   <none>      2m6s\n----\n\nAlternatively, you extract only service endpoints with the following command you'll get no result:\n\n[#get-endpoints1]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get endpoints my-service -o jsonpath='{.subsets[].addresses[*].ip}{\"\\n\"}'\n----\n\nInitialize `IP` and `PORT` envirnment variables to reach the service:\n\n:section-k8s: servicemagic\n:service-exposed: my-service\ninclude::partial$env-curl.adoc[]\n\nAnd run loop script:\n\ninclude::partial$loop.adoc[]\n\nThe client may experience either *connection refusal* or a *delay* before timing out, depending on the features of the load balancer.\n\n[.console-output]\n[source,bash]\n----\ncurl: (7) Failed to connect to 35.224.233.213 port 8000: Connection refused\ncurl: (7) Failed to connect to 35.224.233.213 port 8000: Connection refused\n----\n\n[#label-mypython]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl label pod -l app=mypython inservice=mypods\n----\n\n[.console-output]\n[source,bash]\n----\ncurl: (7) Failed to connect to 35.224.233.213 port 8000: Connection refused\nPython Hello on mypython-deployment-6874f84d85-2kpjl\nPython Hello on mypython-deployment-6874f84d85-2kpjl\nPython Hello on mypython-deployment-6874f84d85-2kpjl\n----\n\n[#label-mynode]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl label pod -l app=mynode inservice=mypods\n----\n\n[.console-output]\n[source,bash]\n----\nPython Hello on mypython-deployment-6874f84d85-2kpjl\nPython Hello on mypython-deployment-6874f84d85-2kpjl\nNode Hello on mynode-deployment-fb5457c5-hhz7h 0\nNode Hello on mynode-deployment-fb5457c5-hhz7h 1\nPython Hello on mypython-deployment-6874f84d85-2kpjl\nPython Hello on mypython-deployment-6874f84d85-2kpjl\nPython Hello on mypython-deployment-6874f84d85-2kpjl\n----\n\n[#label-mygo]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl label pod -l app=mygo inservice=mypods\n----\n\n[.console-output]\n[source,bash]\n----\nNode Hello on mynode-deployment-fb5457c5-hhz7h 59\nNode Hello on mynode-deployment-fb5457c5-hhz7h 60\nGo Hello on mygo-deployment-6d944c5c69-kcvmk\nPython Hello on mypython-deployment-6874f84d85-2kpjl\nPython Hello on mypython-deployment-6874f84d85-2kpjl\n----\n\n[#get-endpoints2]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get endpoints my-service -o jsonpath='{.subsets[].addresses[*].ip}{\"\\n\"}'\n----\n\n[.console-output]\n[source,bash]\n----\n10.130.2.43 10.130.2.44 10.130.2.45\n----\n\nSee the Pod IPs:\n\n[#pod-ips]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get pods -o wide\n----\n\nRemove `mypython` Pod from the Service:\n\n[#remove-label]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl label pod -l app=mypython inservice-\n----\n\n[#get-endpoints3]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get endpoints my-service -o jsonpath='{.subsets[].addresses[*].ip}{\"\\n\"}'\n----\n\n[.console-output]\n[source,bash]\n----\n10.130.2.44 10.130.2.45\n----\n\n== Clean Up\n\n[#clean-up]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete namespace funstuff\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/service.adoc",
    "content": "= Service\n\nNOTE: This follows the creation of the Deployment in the previous chapter\n\nMake sure you are in the correct namespace:\n\n:section-k8s: services\n:set-namespace: myspace\n\ninclude::partial$set-context.adoc[]\n\nMake sure you have Deployment:\n\n[#have-deployment-service]\n[.console-input]\n[source,bash]\n----\nkubectl get deployments\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                      READY   UP-TO-DATE   AVAILABLE   AGE\nquarkus-demo-deployment   3/3     3            3           8m33s\n----\n\nMake sure you have RS:\n\n[#have-rs-service]\n[.console-input]\n[source,bash]\n----\nkubectl get rs\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                                 DESIRED   CURRENT   READY   AGE\nquarkus-demo-deployment-5979886fb7   3         3         3       8m56s\n----\n\nMake sure you have Pods:\n\n[#have-pods-service]\n[.console-input]\n[source,bash]\n----\nkubectl get pods\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                                       READY   STATUS    RESTARTS   AGE\nquarkus-demo-deployment-5979886fb7-c888m   1/1     Running   0          9m17s\nquarkus-demo-deployment-5979886fb7-gdtnz   1/1     Running   0          9m17s\nquarkus-demo-deployment-5979886fb7-grf59   1/1     Running   0          9m17s\n----\n\nCreate a Service\n[#create-service]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: Service\nmetadata:\n  name: the-service\nspec:\n  selector:\n    app: quarkus-demo\n  ports:\n    - protocol: TCP\n      port: 80\n      targetPort: 8080\n  type: LoadBalancer\nEOF\n----\n\n:section-k8s: services\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get services -w\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME          TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE\nthe-service   LoadBalancer   172.30.103.41   <pending>     80:31974/TCP     4s\n----\n\nWait until you see an external IP assigned.\n\nNOTE: On Minikube without an Ingress controller, <pending> will not become a real external IP.  https://kubernetes.io/docs/tasks/access-application-cluster/ingress-minikube/[Optional: Setup Minikube Ingress]\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME    TYPE           CLUSTER-IP      EXTERNAL-IP     PORT(S)          AGE\nmyapp   LoadBalancer   172.30.103.41   34.71.122.153   8080:31974/TCP   44s\n----\n\n:section-k8s: services\n:service-exposed: the-service\ninclude::partial$env-curl.adoc[]\n\nResults:\n\n[.console-output]\n[source,bash]\n----\nSupersonic Subatomic Java with Quarkus quarkus-demo-deployment-5979886fb7-grf59:1\n----\n\nNOTE: \"5979886fb7-grf59\" is part of the unique id for the pod. The `.java` code uses `System.getenv().getOrDefault(\"HOSTNAME\", \"unknown\");`\n\n== Ingress or Route\n\n*Kubernetes Ingress* and *OpenShift Route* are functionally similar, resources used to expose applications externally.\n\nThe `Route` object was developed by Red Hat before the Kubernetes Ingress API was fully mature. It essentially bundles the traffic rule definition and its implementation (via the built-in HAProxy Router) into a single, opinionated feature.\n\nDepending on your underlying platform, continue with the relevant path:\n\n. <<#_kubernetes_ingress,Kubernetes Ingress>>\n. <<#_openshift_route,OpenShift Route>>\n\n=== Kubernetes Ingress\n\n[#create-ingress]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncat <<EOF | kubectl apply -f -\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: myingress\nspec:\n  rules:\n  - host: stuff-myspace.apps.gcp.burrsutter.dev\n    http:\n      paths:\n      - path: /\n        pathType: Prefix\n        backend:\n          service:\n            name: the-service\n            port:\n              number: 80\nEOF          \n----\n\n[#curl-services-ingress]\n[.console-input]\n[source, bash]\n----\ncurl stuff-myspace.apps.gcp.burrsutter.dev\n----\n\n[.console-output]\n[source,bash]\n----\nSupersonic Subatomic Java with Quarkus quarkus-demo-deployment-5979886fb7-gdtnz:2\n----\n\nIn case of using Minikube follow: https://kubernetes.io/docs/tasks/access-application-cluster/ingress-minikube/\n\nRemove the ingress:\n\n[#delete-ingress]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete ingress myingress\n----\n\n=== OpenShift Route\n\n[#expose-service]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\noc expose service the-service\noc get routes\n----\n\n[.console-output]\n[source,bash]\n----\nNAME          HOST/PORT                                     PATH   SERVICES      PORT   TERMINATION   WILDCARD\nthe-service   the-service-myspace.apps.gcp.burrsutter.dev          the-service   8080                 None\n----\n\nThen make a request to the service:\n\n[#curl-services-route]\n[.console-input]\n[source, bash]\n----\ncurl the-service-myspace.apps.gcp.burrsutter.dev\n----\n\n[.console-output]\n[source,bash]\n----\nSupersonic Subatomic Java with Quarkus quarkus-demo-deployment-5979886fb7-gdtnz:3\n----\n\n=== Use jsonpath to pull out the data elements you need for scripting\n\nThe following command dumps the Kubernetes object to a file in JSON format:\n\n[.console-input]\n[source,bash]\n----\noc get route the-service -o json > myroutes.json\n----\n\nIt's also possible to extract specific information from the JSON using JSONPath syntax.\n\n[#route-jq]\n[.console-input]\n[source, bash]\n----\noc get route the-service -o jsonpath=\"{.spec.host}\"\n----\n\n[.console-output]\n[source, bash]\n----\nthe-service-myspace.apps.gcp.burrsutter.dev\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/statefulset.adoc",
    "content": "= StatefulSets\ninclude::_attributes.adoc[]\n:watch-terminal: Terminal 2\n\nA `StatefulSet` provides a unique identity to the Pods that they manage.\n`StatefulSet` s are particularly useful when your application requires a unique network identifier or persistent storage across Pod (re)scheduling or when your application needs some guarantee about the ordering of deployment and scaling.\n\nOne of the most typical examples of using `StatefulSet` s is when one needs to deploy primary/secondary servers (i.e database cluster) where you need to know beforehand the hostname of each of the servers to start the cluster.\nAlso, when you scale up and down you want to do it in a specified order (i.e you want to start the primary node first and then the secondary node).\n\n[IMPORTANT]\n====\n`StatefulSet` requires a Kubernetes _Headless Service_ instead of a standard Kubernetes _service_ in order for it to be accessed.  We will discuss this more below\n====\n\n== Preparation\n\n=== Namespace Setup\n\nMake sure you are in the correct namespace:\n\n:section-k8s: stateful\n:set-namespace: myspace\n\ninclude::partial$namespace-setup-tip.adoc[]\n\ninclude::partial$set-context.adoc[]\n\n=== Watch Terminal\n\nTo be able to observe what's going on, let's open another terminal (*{watch-terminal}*) and `watch` what happens as we run our different jobs\n\n:section-k8s: stateful\n\ninclude::partial$watching-pods-with-nodes.adoc[]\n\n=== Multi-node (minikube)\n\nIf your cluster is running multiple nodes and you need the stateful service to be assigned to a specific node so that you can connect to the service externally, replace `NODE` with the name of the node you don't want to run the stateful service on\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNODE=devnation-02 #<.>\nkubectl taint node pass:[${NODE}] app=quarkus-statefulset:NoExecute\n----\n<.> Replace this and/or repeat for all the nodes in your cluster that you don't want the stateful set to be assigned to.  See also xref:taints-affinity.adoc[Taints and Affinity section, window=_blank]\n\n== StatefulSet\n\nStatefulSet is created by using the Kubernetes `StatefulSet` resource:\n\n[source, yaml]\n----\napiVersion: apps/v1beta1\nkind: StatefulSet\nmetadata:\n  name: quarkus-statefulset\n  labels:\n    app: quarkus-statefulset\nspec:\n  serviceName: \"quarkus\" # <.>\n  replicas: 2\n  selector:\n    matchLabels:\n      app: quarkus-statefulset\n  template:\n    metadata:\n      labels:\n        app: quarkus-statefulset\n    spec:\n      containers:\n      - name: quarkus-statefulset\n        image: quay.io/rhdevelopers/quarkus-demo:v1\n        ports:\n        - containerPort: 8080\n          name: web\n----\n<.> `serviceName` is the name of the (headless) service that governs this `StatefulSet`. This service must exist before the StatefulSet, and is responsible for the network identity of the set\n\n[#hostname-formula]\nWe can predict the hostname for any member pod of a `StatefulSet` by using the following \"formula\":\n\n****\n`StatefulSet.name` + `-` + \"ordinal index\" \n****\n\nThe \"ordinal index\" is a number starting from `0` for the first pod created by the `StatefulSet` and is incremented by one for each additional replica pod.  So in this instance, the we would expect the first pod of the `StatefulSet` above to have the hostname:\n\n****\n`quarkus-statefulset-0`\n****\n\nFinally, as mentioned above, to be able to route traffic to the pods of our StatefulSet, we also need to create a *headless service*:\n\n[source, yaml,subs=\"+quotes\"]\n----\napiVersion: v1\nkind: Service\nmetadata:\n  name: #quarkus# #<.>\n  labels:\n    app: quarkus-statefulset\nspec:\n  ports:\n  - port: 8080\n    name: web\n  clusterIP: None #<.>\n  selector:\n    app: quarkus-statefulset\n----\n<.> Notice that this matches the `serviceName` field of the `StatefulSet`.  This must match to create the dns entry\n<.> Setting `clusterIP` to `None` is what makes the service \"headless\".\n\nApply the following `.yaml` to the cluster to create the `StatefulSet` and the corresponding headless service we looked at above:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/quarkus-statefulset.yaml\n----\n\nYou should then see the following in the watch terminal\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+quotes\"]\n----\nNAME                     READY   STATUS    RESTARTS   AGE\n#quarkus-statefulset-0#   1/1     Running   0          12s\n----\n--\n====\n\nNotice that the Pod name is the `serviceName` with a `-0`, as it is the first (`0` th if you will) instance.  This is as we explained <<hostname-formula,above>>\n\nNow let's take a look at the stateful set itself\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get statefulsets\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                  READY   AGE\nquarkus-statefulset   1/1     109s\n----\n\nAs with `deployments` we can scale `statefulsets`\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl scale sts \\#<.>\n  quarkus-statefulset --replicas=3\n----\n<.> `sts` is the shortname of the `statefulset` api-resource\n\nThen in the watch terminal see\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+quotes\"]\n----\nNAME                    READY   STATUS    RESTARTS   AGE\nquarkus-statefulset-0   1/1     Running   0          95s\n#quarkus-statefulset-1#   1/1     Running   0          2s\n#quarkus-statefulset-2#   1/1     Running   0          1s\n----\n--\n====\n\nNotice that the name of the Pods continues to use <<hostname-formula,the same nomenclature that we called out above>>\n\nAlso, if you check the order of events in the Kubernetes cluster, you'll notice that the Pod name ending with `-1` is created *before* those with higher ordinal index (e.g. with suffix of `-2`).\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get events --sort-by=.metadata.creationTimestamp\n----\n\n[.console-output]\n[source,bash]\n----\n4m4s        Normal   SuccessfulCreate          statefulset/quarkus-statefulset   create Pod quarkus-statefulset-1 in StatefulSet quarkus-statefulset successful\n4m3s        Normal   Pulled                    pod/quarkus-statefulset-1         Container image \"quay.io/rhdevelopers/quarkus-demo:v1\" already present on machine\n4m3s        Normal   Scheduled                 pod/quarkus-statefulset-2         Successfully assigned default/quarkus-statefulset-2 to kube\n4m3s        Normal   Created                   pod/quarkus-statefulset-1         Created container quarkus-statefulset\n4m3s        Normal   Started                   pod/quarkus-statefulset-1         Started container quarkus-statefulset\n4m3s        Normal   SuccessfulCreate          statefulset/quarkus-statefulset   create Pod quarkus-statefulset-2 in StatefulSet quarkus-statefulset successful\n4m2s        Normal   Pulled                    pod/quarkus-statefulset-2         Container image \"quay.io/rhdevelopers/quarkus-demo:v1\" already present on machine\n4m2s        Normal   Created                   pod/quarkus-statefulset-2         Created container quarkus-statefulset\n4m2s        Normal   Started                   pod/quarkus-statefulset-2         Started container quarkus-statefulset\n----\n\n=== Stable Network Identities\n\nThe reason we created the *headless service* previously was to ensure that the pods of our stateful set can be found _within_ the cluster (see <<Exposing StatefulSets,Exposing StatefulSets>> for reaching services from outside the cluster).  \n\nAs each Pod is created, it gets a matching DNS subdomain, taking the form: `$(podname).$(governing service domain)`, where the governing service is defined by the `serviceName` field on the StatefulSetfootnote:[See also the official Kubernetes documentation link:https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-network-id[here]]\n\nWe can test this by creating a pod within the cluster and doing an `nslookup` from within the cluster.  Run the following command to create a pod in the namespace in which we can run cluster local `nslookup` queries\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl run -it --restart=Never --rm --image busybox:1.28 dns-test \n----\n\nFrom within the container, run the folllowing command to see if we can find a pod of our StatefulSet\n\n[tabs]\n====\nContainer::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nnslookup quarkus-statefulset-0.quarkus\n----\n\nThis should yield the following output (though your reported IP address will vary)\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes,+quotes\"]\n----\nServer:    10.96.0.10\nAddress 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local\n\nName:      quarkus-statefulset-0.quarkus\nAddress 1: 172.17.0.3 #quarkus-statefulset-0.quarkus.myspace.svc.cluster.local# #<.>\n----\n<.> Notice that the full address is `$(podname).$(governing service domain).$(namespace)`.svc.cluster.local\n\nYou can now exit the pod (causing it to be cleaned up) by issuing the following command:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nexit\n----\n--\n====\n\nSo with the help of a headless service we can find any pod of the StatefulSet by using its internal DNS name as formulated by the StatefulSet and the headless service.\n\n== Exposing StatefulSets\n\nGiven that our stateful set needed to use a headless service, you'll notice that no external IP is assigned that we can use to access our pods from _outside_ the cluster\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl describe svc quarkus-statefulset\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes,+quotes\"]\n----\nName:              quarkus-statefulset\nNamespace:         myspace\nLabels:            app=quarkus-statefulset\nAnnotations:       <none>\nSelector:          app=quarkus-statefulset\nType:              ClusterIP\nIP Family Policy:  SingleStack\nIP Families:       IPv4\n#IP:                None#\n#IPs:               None#\nPort:              web  8080/TCP\nTargetPort:        8080/TCP\nEndpoints:         172.17.0.3:8080,172.17.0.4:8080,172.17.0.5:8080\nSession Affinity:  None\nEvents:            <none>\n----\n\nInstead, only (internal) endpoints are assigned\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl describe endpoints quarkus-statefulset\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nName:         quarkus-statefulset\nNamespace:    myspace\nLabels:       app=quarkus-statefulset\n              service.kubernetes.io/headless=\nAnnotations:  endpoints.kubernetes.io/last-change-trigger-time: 2021-07-20T04:45:21Z\nSubsets:\n  Addresses:          172.17.0.3,172.17.0.4,172.17.0.5\n  NotReadyAddresses:  <none>\n  Ports:\n    Name  Port  Protocol\n    ----  ----  --------\n    web   8080  TCP\n\nEvents:  <none>\n----\n\nThis kind of makes sense since the whole point of using `StatefulSets` is so that we can reference a specific pod by a predictable name instead of having them abstracted away by a normal (non-headless) `Service`.  To assist with our ability to access pods by name, kubernetes exposes a label on all `StatefulSet` pods that we can use as a selector to our service\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl describe pod quarkus-statefulset-2\n----\n\nAnd the abbreviated output shows our label (highlighted)\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes,+quotes\"]\n----\nName:         quarkus-statefulset-2\nNamespace:    myspace\nPriority:     0\nNode:         devnation/192.168.49.2\nStart Time:   Tue, 20 Jul 2021 04:45:04 +0000\nLabels:       app=quarkus-statefulset\n              controller-revision-hash=quarkus-statefulset-6bf5d59699\n              #statefulset.kubernetes.io/pod-name=quarkus-statefulset-2#\nAnnotations:  <none>\n----\n\n:quick-open-file: quarkus-statefulset-external-svc.yaml\n\nWe can use this label as a selector for a service that targets this specific pod.  Take a look at `{quick-open-file}`: \n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[.console-output]\n[source,yaml,subs=\"+macros,+attributes\"]\n.{quick-open-file}\n----\ninclude::example$quarkus-statefulset-external-svc.yaml[]\n----\n<.> Indicate that this service should be exposed via LoadBalancer\n<.> Prevent excessive hops by routing traffic directly to the node\n<.> A selector that leverages the label provided automatically by the Kubernetes StatefulSet functionality\n\nHaving reviewed the service we can now create it:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/quarkus-statefulset-external-svc.yaml\n----\n\nMeanwhile, in the main terminal, send a request:\n\n:service-exposed: quarkus-statefulset-2\ninclude::partial$env-curl.adoc[]\n\nYou should receive the following back\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes,+quotes\"]\n----\nSupersonic Subatomic Java with Quarkus quarkus-statefulset-2:1 #<.>\n----\n<.> Notice the hostname of `quarkus-statefulset-2`.  This is part of why we used stateful sets in the first place, so that pods would get predictable hostnames\n\n== Scale Down and Cleanup\n\nFinally, if we scale down to two instances, the one that is destroyed is not randomly chosen, but the one started later (`quarkus-statefulset-2`).\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl scale sts quarkus-statefulset --replicas=2\n----\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+quotes\"]\n----\nNAME                    READY   STATUS        RESTARTS   AGE\nquarkus-statefulset-0   1/1     Running       0          9m22s\nquarkus-statefulset-1   1/1     Running       0          7m49s\n#quarkus-statefulset-2   0/1     Terminating   0          7m48s#\n----\n--\n====\n\nBeware when using stateful sets and services that this could break things.  Remember that the service we created above referenced that exact pod in the stateful set.  If you try to reach it now\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncurl pass:[${IP}:${PORT}]\n----\n\nYou'll get an error (perhaps like this one)\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncurl: (7) Failed to connect to 192.168.86.58 port 31834: Connection refused\n----\n\n=== Clean Up\n\nYou've now reached the end of this section.  You can clean up all aspects of the statefulset by deleting the yaml that spawned it (as well as the external service)\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete -f apps/kubefiles/quarkus-statefulset.yaml\nkubectl delete -f apps/kubefiles/quarkus-statefulset-external-svc.yaml\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/taints-affinity.adoc",
    "content": "= Taints and Affinity\ninclude::_attributes.adoc[]\n:watch-terminal: Terminal 2\n\nSo far, when we deployed any Pod in the Kubernetes cluster, it was run on any node that met the requirements (ie memory requirements, CPU requirements, ...)\n\nHowever, in Kubernetes there are two concepts that allow you to further configure the scheduler, so that Pods are assigned to Nodes following some business criteria.\n\n== Preparation\n\n=== Minikube Multinode\n\ninclude::https://raw.githubusercontent.com/redhat-developer-demos/rhd-tutorial-common/master/minikube-multinode.adoc[]\n\n=== Watch Nodes\n\nTo be able to observe what's going on, let's open another terminal (*{watch-terminal}*) and `watch` what happens to the pods as we change taints on the nodes.\n\n:section-k8s: taints\ninclude::partial$watching-pods-with-nodes.adoc[]\n\n== Taints\n\nA Taint is applied to a Kubernetes Node that signals the scheduler to avoid or not schedule certain Pods.\n\nA Toleration is applied to a Pod definition and provides an exception to the taint.\n\nLet's describe the current nodes, in this case as an OpenShift cluster is used, you can see several nodes:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl describe nodes | egrep \"Name:|Taints:\"\n----\n\n[.console-output]\n[source,bash]\n----\nName:               ip-10-0-136-107.eu-central-1.compute.internal\nTaints:             node-role.kubernetes.io/master:NoSchedule\nName:               ip-10-0-140-186.eu-central-1.compute.internal\nTaints:             <none>\nName:               ip-10-0-141-128.eu-central-1.compute.internal\nTaints:             <none>\nName:               ip-10-0-146-109.eu-central-1.compute.internal\nTaints:             <none>\nName:               ip-10-0-150-226.eu-central-1.compute.internal\nTaints:             <none>\n----\n\n[NOTE]\n====\nNotice that in this case, the `master` node contains a taint which blocks your application Pods from being scheduled there.\n====\n\nLet's add a taint to all nodes:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl taint nodes --all=true color=blue:NoSchedule\n----\n\n[.console-output]\n[source,bash]\n----\nnode/ip-10-0-136-107.eu-central-1.compute.internal tainted\nnode/ip-10-0-140-186.eu-central-1.compute.internal tainted\nnode/ip-10-0-141-128.eu-central-1.compute.internal tainted\nnode/ip-10-0-146-109.eu-central-1.compute.internal tainted\nnode/ip-10-0-150-226.eu-central-1.compute.internal tainted\nnode/ip-10-0-155-122.eu-central-1.compute.internal tainted\nnode/ip-10-0-162-206.eu-central-1.compute.internal tainted\nnode/ip-10-0-168-102.eu-central-1.compute.internal tainted\nnode/ip-10-0-175-64.eu-central-1.compute.internal tainted\n----\n\nThe color=blue is simply a key=value pair to identify the taint and NoSchedule is the specific effect for pods that can't \"tolerate\" the taint.  In other words, if a pod does not tolerate \"color=blue\" then the effect will be \"NoSchedule\"\n\nSo let's try this out.  From the main terminal, we'll deploy a new pod that doesn't have any particular tolerations:\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-deployment.yml\n----\n--\n====\n\nYou'll see the output in the other terminal change\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+quotes\"]\n----\nNAME                      READY   STATUS    AGE     NODE\nmyboot-7cbfbd9b89-hqx6h   0/1     #Pending#   4m12s   devnation\n----\n--\n====\n\nThe pod will remain in `Pending` status as it has no schedulable Node available.\n\nWe can get more insight into this by entering the following\n\n[tabs]\n====\nTerminal 1 - Minikube::\n+\n--\n// include untagged regions and any regions tagged with minikube\n// See: https://docs.asciidoctor.org/asciidoc/latest/directives/include-tagged-regions/#tagging-regions\ninclude::partial$taint-remove-taint.adoc[tags=**;!*;minikube]\n\n--\nTerminal 1 - OpenShift::\n+\n--\n// Include all untagged regions and any regions tagged with openshift\n// See: https://docs.asciidoctor.org/asciidoc/latest/directives/include-tagged-regions/#tagging-regions\ninclude::partial$taint-remove-taint.adoc[tags=**;!*;openshift]\n\n--\n====\n\nNow in *{watch-terminal}* you should see the Pending pod scheduled to the newly untained node.  \n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+quotes\"]\n----\nNAME                      READY   STATUS              AGE       NODE\nmyboot-7cbfbd9b89-hqx6h   0/1     #ContainerCreating#   20m   #devnation-m02#\n----\n--\n====\n\nFinally, let's take a quick look at the taint status on all the nodes.  \n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl describe nodes | egrep \"Name:|Taints:\"\n----\n\n[.console-output]\n[source,bash]\n----\nName:               ip-10-0-136-107.eu-central-1.compute.internal\nTaints:             node-role.kubernetes.io/master:NoSchedule\nName:               ip-10-0-140-186.eu-central-1.compute.internal\nTaints:             <none>\nName:               ip-10-0-141-128.eu-central-1.compute.internal\nTaints:             color=blue:NoSchedule\nName:               ip-10-0-146-109.eu-central-1.compute.internal\nTaints:             color=blue:NoSchedule\n----\n\n--\n====\n\n=== Restore Taint\n\nAdd the taint back to the node (or in this case all nodes): \n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl taint nodes --all=true color=blue:NoSchedule --overwrite\n----\n\n[TIP]\n====\nSetting the taint on all nodes is a bit sloppy.  If you'd like you can get the same effect a bit more elegantly by setting the taint only on the node from which it was removed.  For example:\n\n----\nkubectl taint node ip-10-0-140-186.eu-central-1.compute.internal color=blue:NoSchedule\n----\n====\n\nTake a look and notice that the pod is still running despite the change in taint (this is due to scheduling being a one time activity in the lifecycle of a pod)\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes,+quotes\"]\n----\nNAME                      READY   STATUS    AGE   NODE\nmyboot-7cbfbd9b89-bzhxw   1/1     #Running#   18m   devnation-m02\n----\n\n--\n====\n\n\n=== Clean Up\n\nUndeploy the myboot deployment and add again the taint to the node:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete -f apps/kubefiles/myboot-deployment.yml\n----\n\n== Tolerations\n\nLet's create a Pod but containing a toleration, so it can be scheduled to a tainted node.\n\n[source, yaml]\n----\nspec:\n  tolerations:\n  - key: \"color\"\n    operator: \"Equal\"\n    value: \"blue\"\n    effect: \"NoSchedule\"\n  containers:\n  - name: myboot\n    image: quay.io/rhdevelopers/myboot:v1\n----\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-toleration.yaml\n----\n--\n====\n\nAnd then we should see before too long in our watch window our pod get scheduled and advance to the run state\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+quotes\"]\n----\nNAME                      READY   STATUS    AGE     NODE\nmyboot-84b457458b-mbf9r   1/1     #Running#   3m18s   devnation-m02\n----\n--\n====\n\nNow, although all nodes contain a taint, the Pod is scheduled and run as we defined a tolerance against color=blue taint.\n\n=== Clean Up\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete -f apps/kubefiles/myboot-toleration.yaml\n----\n\n== `NoExecution` Taint\n\nSo far, you've seen the `NoSchedule` taint effect which means that newly created Pods will not be scheduled there unless they have an overriding toleration.\nBut notice that if we add this taint to a node that already has running/scheduled Pods, this taint will not terminate them.\n\nLet's change that by using `NoExecution` effect. \n\nFirst of all, let's remove all previous taints.\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl taint nodes --all=true color=blue:NoSchedule-\n----\n--\n====\n\n\nThen deploy another instance of myboot (with no Tolerations):\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-deployment.yml\n----\n--\n====\n\nWe should see the following in the watch\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash]\n----\nNAME                      READY   STATUS    AGE   NODE\nmyboot-7cbfbd9b89-wpddg   1/1     Running   47s   devnation-m02\n----\n\n--\n====\n\nNow let's taint find the node the pod is running on\n\n[tabs]\n====\nTerminal 1::\n+\n--\ninclude::partial$find_node_for_pod.adoc[]\n\n[.console-output]\n[source,bash]\n----\n\"ip-10-0-146-109.eu-central-1.compute.internal\"\n----\n--\n====\n\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl taint node pass:[${NODE}] color=blue:NoExecute\n----\n--\n====\n\nAs soon as we do this, we should be able to watch this \"rescheduling\" occur in the {watch-terminal} watch\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n\n[.console-output]\n[source,bash,subs=\"+quotes\"]\n----\nNAME                      READY   STATUS              AGE   NODE\nmyboot-7cbfbd9b89-5t24z   0/1     #ContainerCreating#   16s   devnation\nmyboot-7cbfbd9b89-wpddg   1/1     #Terminating#         65m   devnation-m02\n----\n\n--\n====\n\n[NOTE]\n====\nIf you have more nodes available then the Pod is terminated and deployed onto another node, if it is not the case, then the Pod will remain in `Pending` status.\n====\n\n=== Clean Up\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete -f apps/kubefiles/myboot-deployment.yml\n----\n--\n====\n\nAnd remove the NoExecute taint \n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl taint node pass:[${NODE}] color=blue:NoExecute-\n----\n--\n====\n\n== Affinity & Anti-Affinity\n\nThere is another way of changing where Pods are scheduled using Node/Pod Affinity and Anti-affinity.\nYou can create rules that not only ban where Pods can run but also to favor where they should be run.\n\nIn addition to creating affinities between Pods and Nodes, you can also create affinities between Pods.  You can decide that a group of Pods should be always be deployed together on the same node(s).\nReasons such as significant network communication between Pods and you want to avoid external network calls or perhaps shared storage devices.\n\n=== Node Affinity\n\n:quick-open-file: myboot-node-affinity.yml\n\nLet's deploy a new pod with a node affinity.  Take a look at `{quick-open-file}` (relevant section shown below)\n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[source, yaml]\n.{quick-open-file}\n----\nspec:\n  affinity:\n    nodeAffinity:\n      requiredDuringSchedulingIgnoredDuringExecution: #<.>\n        nodeSelectorTerms:\n        - matchExpressions:\n          - key: color\n            operator: In\n            values:\n            - blue #<.>\n      containers:\n      - name: myboot\n        image: quay.io/rhdevelopers/myboot:v1\n----\n<.> This key highlights that what's follows must be used during scheduling but not a factor once a pod is executing\n<.> The `matchExpressions` is saying this pod has affinity for any node with a `color` in the value set `blue`\n\nNow let's deploy this\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-node-affinity.yml\n----\n--\n====\n\nAnd we'll see in our watch window the pod in a pending state\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes,+quotes\"]\n----\nNAME                      READY   STATUS    AGE   NODE\nmyboot-546d4d9b45-7vgfc   0/1     #Pending#   6s    <none>\n----\n--\n====\n\nLet's create a *label* on a node matching the affinity expression:\n\n[tabs]\n====\nTerminal 1 - Minikube::\n+\n--\ninclude::partial$affinity_label.adoc[tags=**;!*;minikube]\n--\nTerminal 1 - OpenShift::\n+\n--\ninclude::partial$affinity_label.adoc[tags=**;!*;openshift]\n--\n====\n\nAnd then in the watch window the output should change to:\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes,+quotes\"]\n----\nNAME                      READY   STATUS              AGE   NODE\nmyboot-546d4d9b45-7vgfc   0/1     #ContainerCreating#   15m   devnation-m02\n----\n--\n====\n\n\nLet's delete the label from the node that the pod is running on\n\n[tabs]\n====\nTerminal 1::\n+\n--\nFirst find the node the pod is running on\n\ninclude::partial$find_node_for_pod.adoc[]\n\nand then remove the color label from it\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl label nodes pass:[${NODE}] color-\n----\n--\n====\n\nAnd notice the that watch output is *unchanged* and if running, the pod will continue to run\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash]\n----\nNAME                      READY   STATUS    AGE   NODE\nmyboot-546d4d9b45-7vgfc   1/1     Running   22m   devnation-m02\n----\n--\n====\n\nSince we used the `requiredDuringSchedulingIgnoredDuringExecution` in the deployment spec for our pod, we got our affinity to work like taints (in the previous section) worked, namely, that the rule is set during the scheduling phase but ignore after that (i.e. once executing).  Therefore the Pod is not removed in our case.\n\nThis is an example of a _hard_ rule:\n\n.Hard Rule\n****\nIf the Kubernetes scheduler does not find any node with the required label then the Pod reminds in _Pending_ state.\n****\n\nThere is also a way to create a _soft_ rule:\n\n.Soft Rule\n****\nThe Kubernetes scheduler attempts to match the rules but if it can.  However, if it can't then the Pod is scheduled to any node.  \n****\n\nConsider the example below:\n\n[.console-output]\n[source,yaml,subs=\"+macros,+attributes,+quotes\"]\n----\nspec:\n  affinity:\n    nodeAffinity:\n      preferredDuringSchedulingIgnoredDuringExecution: #<.>\n      - weight: 1\n        preference:\n          matchExpressions:\n          - key: color\n            operator: In\n            values:\n            - blue\n----\n<.> You can see the use of the word _preferred_ vs _required_.\n\n==== Clean Up\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete -f apps/kubefiles/myboot-node-affinity.yml\n----\n\n=== Pod Affinity/Anti-Affinity\n\n:quick-open-file: myboot-pod-affinity.yml\n\nLet's deploy a new pod with a Pod Affinity.  See this relevant part of `{quick-open-file}`.  \n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[source, yaml]\n.{quick-open-file}\n----\nspec:\n  affinity:\n    podAffinity:\n      requiredDuringSchedulingIgnoredDuringExecution:\n      - topologyKey: kubernetes.io/hostname # <1>\n        labelSelector: \n          matchExpressions:\n          - key: app\n            operator: In\n            values:\n            - myboot # <2>\n  containers:\n----\n<1> The node label key. If two nodes are labeled with this key and have identical values, the scheduler treats both nodes as being in the same topology. In this case, `hostname` is a label that is different for each node.\n<2> The affinity is with Pods labeled with `app=myboot`.\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-pod-affinity.yml\n----\n\n[.console-output]\n[source,bash]\n----\nNAME                      READY  STATUS   AGE    NODE\nmyboot2-7c5f46cbc9-hwm2v  0/1    Pending  5h38m  <none>\n----\n--\n====\n\nThe `myboot2` Pod is pending as couldn't find any Pod matching the affinity rule.\n\nTo address this, let's deploy a  `myboot` application labeled with `app=myboot`.\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-deployment.yml\n----\n--\n====\n\nAnd we'll see that both start up, and run on _the same node_\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+quotes\"]\n----\nNAME                      READY  STATUS             AGE    NODE\nmyboot-7cbfbd9b89-267k6   0/1    ContainerCreating  5s     #devnation-m02#\nmyboot2-7c5f46cbc9-hwm2v  0/1    ContainerCreating  5h45m  #devnation-m02#\n----\n--\n====\n\n[TIP]\n====\nWhat you've just seen is a _hard_ rule, you can use a \"soft\" rules as well in Pod Affinity.\n\n[.console-output]\n[source, yaml, subs=\"+quotes\"]\n----\nspec:\n  affinity:\n    podAntiAffinity:\n      #preferredDuringSchedulingIgnoredDuringExecution:#\n      - weight: 1\n        podAffinityTerm:\n          topologyKey: kubernetes.io/hostname \n          labelSelector:\n            matchExpressions:  \n            - key: app\n              operator: In\n              values:\n              - myboot   \n----\n====\n\n*Anti-affinity* is used to insure that two Pods do NOT run together on the same node.\n\n:quick-open-file: myboot-pod-antiaffinity.yaml\n\nLet's add another pod.  Open `{quick-open-file}` and focus on the following part\n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[.console-output]\n[source, yaml]\n.{quick-open-file}\n----\nspec:\n  affinity:\n    podAntiAffinity:\n      requiredDuringSchedulingIgnoredDuringExecution:\n      - topologyKey: kubernetes.io/hostname\n        labelSelector: \n          matchExpressions:\n          - key: app\n            operator: In\n            values:\n            - myboot\n----\n\nThis basically says that this pod should not be scheduled on any individual node (`topologyKey: kubernetes.io/hostname`) that has a pod with the `app=myboot` label.\n\nDeploy a myboot3 with the above anti-affinity rule\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-pod-antiaffinity.yaml\n----\n--\n====\n\nAnd then notice what happens in the watch window\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes,+quotes\"]\n----\nNAME                      READY  STATUS             AGE    NODE\nmyboot-7cbfbd9b89-267k6   1/1    Running            10m    devnation-m02\nmyboot2-7c5f46cbc9-hwm2v  1/1    Running            5h56m  devnation-m02\nmyboot3-6f95c866f6-7kvdw  0/1    ContainerCreating  6s     #devnation# \n----\n--\n====\n\nAs you can see from the highlight, the `myboot3` Pod is deployed in a different node than the `myboot` Pod\n\n==== Clean Up\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl delete -f apps/kubefiles/myboot-pod-affinity.yml\nkubectl delete -f apps/kubefiles/myboot-pod-antiaffinity.yaml\nkubectl delete -f apps/kubefiles/myboot-deployment.yml\n----\n--\n====\n"
  },
  {
    "path": "documentation/modules/ROOT/pages/volumes-persistentvolumes.adoc",
    "content": "= Volumes & Persistent Volumes\ninclude::_attributes.adoc[]\n:watch-terminal: Terminal 2\n:file-watch-terminal: Terminal 3\n\nContainers are ephemeral by definition, which means that anything that it is stored at running time is lost when the container is stopped.\nThis might cause problems with containers that need to persist their data, like database containers.\n\nA Kubernetes volume is just a directory that is accessible to the Containers in a Pod. \nThe concept is similar to Docker volumes, but in Docker you are mapping the container to a computer host, whereas in the case of Kubernetes volumes, the medium that backs it and the contents of it are determined by the particular volume type used.\n\nSome of the volume types are:\n\n* awsElasticBlockStore\n* azureDisk\n* cephfs\n* nfs\n* local\n* empty dir\n* host path\n\n== Preparation\n\n=== Namespace\n\n:section-k8s: volumes\n:set-namespace: myspace\n\nMake sure the proper namespace `{set-namespace}` is created and context is set to point to it.\n\ninclude::partial$namespace-setup-tip.adoc[]\n\ninclude::partial$set-context.adoc[]\n\n=== Watch\n\nIf it's not open already, you'll want to have a terminal open (call it *{watch-terminal}*) to watch what's going on with the pods in our current namespace\n\n:section-k8s: volumes\ninclude::partial$watching-pods-with-nodes.adoc[]\n\n== Volumes\n\nLet's start with two examples of `Volumes`.\n\n=== EmptyDir\n\nAn `emptyDir` volume is first created when a Pod is assigned to a node and exists as long as that Pod is running on that node.\nAs the name says, it is initially empty.\nAll Containers in the same Pod can read and write in the same `emptyDir` volume.\nWhen a Pod is restarted or removed, the data in the `emptyDir` is lost forever.\n\n:quick-open-file: myboot-pod-volume.yml\n\nLet's deploy a service that exposes two endpoints, one to write content to a file and another one to retrieve the content from that file.  Open `{quick-open-file}`\n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[source, yaml]\n.{quick-open-file}\n----\ninclude::example$myboot-pod-volume.yml[]\n----\n<.> Notice that this is a `Pod` and not a `Deployment`\n<.> This is where this mount point will appear in the pod.  See below \n<.> This must match the name of a volume that we define, in this case it is defined right at the bottom of the file\n\nIn `volumes` section, you are defining the volume, and in `volumeMounts` section, how the volume is mounted inside the container.\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-pod-volume.yml\n----\n\nThen in our watch window we should see something like\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME         READY  STATUS             AGE  NODE\nmyboot-demo  0/1    ContainerCreating  9s   devnation\n----\n--\n====\n\nOnce the pod is running, let's exec into the container:\n\n[.console-input]\n[source,bash]\n----\nkubectl exec -ti myboot-demo -- /bin/bash\n----\n\nAnd once `exec` 'd into the container, run the following commands: \n\n[tabs]\n====\nContainer::\n+\n--\n[.console-input]\n[source,bash]\n----\ncurl localhost:8080/appendgreetingfile\ncurl localhost:8080/readgreetingfile\n----\n\nWhich should return\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nJambo\n----\n\nIn this case, the `emptyDir` was set to `/tmp/demo` so you can check the directory content by running `ls`:\n\n[.console-input]\n[source,bash]\n----\nls /tmp/demo\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ngreeting.txt\n----\n--\n====\n\n==== EmptyDir Ephemerality\n\nIf you haven't already, close the container's shell:\n\n[tabs]\n====\nContainer::\n+\n--\n[.console-input]\n[source,bash]\n----\nexit\n----\n--\n====\n\nAnd delete the pod:\n\n[.console-input]\n[source,bash]\n----\nkubectl delete pod myboot-demo\n----\n\n[IMPORTANT]\n====\nYou need to wait until the pod is completely deleted before trying to deploy it again\n====\n\nThen if you deploy the same service again: \n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl apply -f apps/kubefiles/myboot-pod-volume.yml\n----\n\nAnd once in the `Running` state `exec` into the pod: \n\n[.console-input]\n[source,bash]\n----\nkubectl exec -ti myboot-demo -- /bin/bash\n----\n\n[tabs]\n====\nContainer::\n+\n--\n\nLet's list the contents of our mount point in our new pod\n\n[.console-input]\n[source,bash]\n----\nls /tmp/demo\n----\n\nYou'll notice that the directory content is empty, meaning that the file we created with the last pod was destroyed when the pod was deleted\n\n[.console-output]\n[source,bash]\n----\nroot@myboot-demo:/app# \n----\n\nExit the pod\n\n[.console-input]\n[source,bash]\n----\nexit\n----\n\n--\n====\n\nNow delete the pod.\n\n[.console-input]\n[source,bash]\n----\nkubectl delete pod myboot-demo\n----\n\n==== EmptyDir Sharing in Pod\n\n`emptyDir` is shared between containers of the same Pod.  Let's take a look at a deployment that creates two containers in the same pod that mount the same `emptyDir` volume.\n\n:quick-open-file: myboot-pods-volume.yml\ninclude::partial$tip_vscode_quick_open.adoc[]\n\nConsider `{quick-open-file}`: \n\n[.console-output]\n[source,yaml]\n.{quick-open-file}\n----\ninclude::example$myboot-pods-volume.yml[]\n----\n<.> The first container in the pod is called myboot-demo-1 and mounts `demo-volume` at `/tmp/demo`\n<.> The second container in the pod is called `myboot-demo-2` and mounts `demo-volume` at the same `/tmp/demo` point\n<.> Both containers use the same exact image\n<.> Notice that the second container needs to listen on a different port from the first since the containers share ports on the pod.  The `env` directive at this level only applies to the `myboot-demo-2` container\n<.> The volume is defined only once but referenced by each container in the pod\n\nNow let's create that deployment in the `{set-namespace}` namespace\n\n[.console-input]\n[source,bash]\n----\nkubectl apply -f apps/kubefiles/myboot-pods-volume.yml\n----\n\nAnd in our pod watch we should see\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes,+quotes\"]\n----\nNAME          READY   STATUS    RESTARTS   AGE\nmyboot-demo   #2/2#    Running   0          4s\n----\n\nNotice the `2/2` ready status.  This represents the 2 containers in the pod definition\n--\n====\n\nFirst, let's exec into the *second* container in the pod and start a watch on the mount point.  For this we'll open yet another terminal (*{file-watch-terminal}*) `exec` into the other container in the pod to run the `cat` command\n\n[tabs]\n====\n{file-watch-terminal}::\n+\n--\n\ninclude::partial$open-terminal-in-editor-inset.adoc[]\n\n[.console-input]\n[source,bash]\n----\nkubectl exec -it myboot-demo -c myboot-demo-2 -- bash \n----\n\nAnd then from inside the `myboot-demo-2` container in the pod, run the following command: \n\n[.console-input]\n[source,bash]\n----\nwatch -n1 -- \"ls -l /tmp/demo && eval \"\"cat /tmp/demo/greeting.txt\"\"\"\n----\n\nWhich will at first return\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ntotal 0\ncat: /tmp/demo/greeting.txt: No such file or directory\n----\n\n--\n====\n\nLet's access into the *first* container in the main terminal and see if we can get it to create a file that the *second* container can see\n\n[tabs]\n====\nTerminal 1::\n+\n--\n[.console-input]\n[source,bash]\n----\nkubectl exec -ti myboot-demo -c myboot-demo-1 -- /bin/bash\n----\n\nand generate some content to `/tmp/demo` directory.\n\n[.console-input]\n[source,bash]\n----\ncurl localhost:8080/appendgreetingfile\n----\n\nAnd then show that the file exists and what its content is: \n\n[.console-input]\n[source,bash]\n----\nls -l /tmp/demo && echo $(cat /tmp/demo/greeting.txt) \n----\n\n[.console-output]\n[source,bash]\n----\ntotal 4\n-rw-r--r--. 1 root root 5 Jul 13 08:11 greeting.txt\nJambo\n----\n\n--\n====\n\nMeanwhile in *{file-watch-terminal}* you should see something like: \n\n[tabs]\n====\n{file-watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ntotal 4\n-rw-r--r--. 1 root root 5 Jul 13 08:11 greeting.txt\nJambo\n----\n\nHit kbd:[CTRL+c] to exit the watch and then exit out of the `exec` to the pod\n\n[.console-input]\n[source,bash]\n----\nexit\n----\n\nNow, back in your terminal you can get the volume information from a Pod by running:\n\n[.console-input]\n[source,bash]\n----\nkubectl describe pod myboot-demo\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nVolumes:\n  demo-volume:\n    Type:       EmptyDir (a temporary directory that shares a pods lifetime)\n    Medium:\n    SizeLimit:  <unset>\n----\n\n--\n====\n\n==== Clean Up\n\ninclude::partial$terminal-cleanup.adoc[tags=**;!*;term3;term-exec]\n\n=== HostPath\n\n:quick-open-file: myboot-pod-volume-hostpath.yml\n\nA `hostPath` volume mounts a file or directory from the node's filesystem into the Pod.  Take a look at `{quick-open-file}`\n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[source, yaml]\n.{quick-open-file}\n----\ninclude::example$myboot-pod-volume-hostpath.yaml[]\n----\n<.> We're mounting the same location as before, but you can see that we define the volume as `hostPath` here instead of `emptyDir`\n<.> `/mnt/data` is a location on the kubernetes `node` to which this pod gets assigned\n\nIn this case, you are defining the host/node directory where the contents are going to be stored.\n\n[.console-input]\n[source,bash]\n----\nkubectl apply -f apps/kubefiles/myboot-pod-volume-hostpath.yaml\n----\n\nNow, if you describe the Pod, in volumes section, you'll see:\n\n[.console-input]\n[source,bash]\n----\nkubectl describe pod myboot-demo\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nVolumes:\n  demo-volume:\n    Type:          HostPath (bare host directory volume)\n    Path:          /mnt/data\n    HostPathType:\n----\n\n\n[tabs]\n====\n{file-watch-terminal}::\n+\n--\nLet's open a terminal where we can watch the directory on the 'host' or the 'node'\n\ninclude::partial$open-terminal-in-editor-inset.adoc[]\n\n\n:mount-dir: /mnt/data\ninclude::partial$watch-node-directory.adoc[]\n\n--\n====\n\n[tabs]\n====\nTerminal 1::\n+\n--\n\ninclude::partial$create-greeting-file.adoc[]\n\n--\n====\n\nMeanwhile in the other terminal (*{file-watch-terminal}*) you should at the same time see the watch output change\n\n[tabs]\n====\n{file-watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nEvery 1.0s: eval ls -al /mnt/da...  devnation: Tue Jul 13 09:14:28 2021\n\ntotal 4\ndrwxr-xr-x. 1 root root 24 Jul 13 09:13 .\ndrwxr-xr-x. 1 root root  8 Jul 13 08:24 ..\n-rw-r--r--. 1 root root  5 Jul 13 09:13 greeting.txt\nJambo\n----\n--\n====\n\nNotice that now the content stored in `/tmp/demo` inside the Pod is stored at host path `/mnt/data`, so if the Pod dies, the content is not lost.\n\nBut this might not solve all the problems as if the Pod goes down and it is rescheduled in another node, then the data will not be in this other node.\n\nLet's see another example, in this case for an Amazon EBS Volume:\n\n[source, yaml]\n----\napiVersion: v1\nkind: Pod\nmetadata:\n  name: test-ebs\nspec:\n...  \n  volumes:\n    - name: test-volume\n      awsElasticBlockStore:\n        volumeID: <volume-id>\n        fsType: ext4\n----\n\nWhat we want you to notice from the previous snippet is that you are mixing things from your application (ie the container, probes, ports, ...) things that are more in the _dev_ side with things more related to the cloud (ie physical storage), which falls more in the _ops_ side.\n\nTo avoid this mix of concepts, Kubernetes offers some layer of abstractions, so developers just ask for space to store data (_persistent volume claim_), and the operations team offers the physical storage configuration.\n\n==== Clean Up\n\ninclude::partial$terminal-cleanup.adoc[tags=**;!*;term-exec]\n\n== Persistent Volume & Persistent Volume Claim\n\nA `PersistentVolume` (_PV_) is a Kubernetes resource that is created by an administrator or dynamically using `Storage Classes` independently from the Pod.\nIt captures the details of the implementation of the storage and can be NFS, Ceph, iSCSI, or a cloud-provider-specific storage system.\n\nA `PersistentVolumeClaim` (_PVC_) is a request for storage by a user. \nIt can request for a specific volume size or, for example, the access mode.\n\n=== Persistent volume/claim with hostPath\n\n:quick-open-file: demo-persistent-volume-hostpath.yaml\n\nLet's use `hostPath` strategy, but not configuring it directly as volume, but using persistent volume and persistent volume claim. Check out `{quick-open-file}`:\n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[source, yaml]\n.{quick-open-file}\n----\nkind: PersistentVolume\napiVersion: v1\nmetadata:\n  name: my-persistent-volume\n  labels:\n    type: local\nspec:\n  storageClassName: pv-demo \n  capacity:\n    storage: 100Mi\n  accessModes:\n    - ReadWriteOnce\n  hostPath:\n    path: \"/mnt/persistent-volume\"\n----\n\nNow, the `volume` information is not in the pod anymore but in the _persistent volume_ object.\n\n[.console-input]\n[source,bash]\n----\nkubectl apply -f apps/kubefiles/demo-persistent-volume-hostpath.yaml \n\nkubectl get pv -w\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                                           STORAGECLASS   REASON   AGE\nmy-persistent-volume                       100Mi      RWO            Retain           Available                                                   pv-demo                 5s\n----\n\n:mount-dir: /mnt/persistent-volume\n\nOnce the volume is established, let's update our file watch terminal to look in the volume's new location: `{mount-dir}`\n\n[tabs]\n====\n{file-watch-terminal}::\n+\n--\n\nHit kbd:[CTRL+c] to exit out of the current watch\n\nThen start a new watch\n\ninclude::partial$file-watch-command.adoc[]\n--\n====\n\n:quick-open-file: myboot-persistent-volume-claim.yaml\n\nThen from the dev side, we need to claim what we need from the _PV_.\nIn the following example, we are requesting for *10Mi* space.  See `{quick-open-file}`:\n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[source, yaml]\n.{quick-open-file}\n----\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: myboot-volumeclaim\nspec:\n  storageClassName: pv-demo \n  accessModes:\n    - ReadWriteOnce\n  resources:\n    requests:\n      storage: 10Mi\n----\n\n\n[.console-input]\n[source,bash]\n----\nkubectl apply -f apps/kubefiles/myboot-persistent-volume-claim.yaml\n\nkubectl get pvc -w\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                 STATUS   VOLUME                 CAPACITY   ACCESS MODES   STORAGECLASS   AGE\nmyboot-volumeclaim   Bound    my-persistent-volume   100Mi      RWO            pv-demo        3s\n----\n\n:quick-open-file: myboot-pod-volume-pvc.yaml\n\nThe big difference is that now in the pod you are just defining in the `volumes` section, not the volume configuration directly, but the _persistent volume claim_ to use.  See `{quick-open-file}`:\n\ninclude::partial$tip_vscode_quick_open.adoc[]\n\n[source, yaml]\n.{quick-open-file}\n----\napiVersion: v1\nkind: Pod\nmetadata:\n  name: myboot-demo\nspec:\n  containers:\n  - name: myboot-demo\n    image: quay.io/rhdevelopers/myboot:v4\n    \n    volumeMounts:\n    - mountPath: /tmp/demo\n      name: demo-volume\n\n  volumes:\n  - name: demo-volume\n    persistentVolumeClaim:\n      claimName: myboot-volumeclaim\n----\n\n[.console-input]\n[source,bash]\n----\nkubectl apply -f apps/kubefiles/myboot-pod-volume-pvc.yaml\n\nkubectl describe pod myboot-demo\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nVolumes:\n  demo-volume:\n    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)\n    ClaimName:  myboot-volumeclaim\n    ReadOnly:   false\n----\n\nNotice that now the description of the pod shows that the volume is not set directly but through a persistent volume claim.\n\ninclude::partial$create-greeting-file.adoc[]\n\nAnd as soon as we've done that we'll expect to see the following on the path on the node that the `PersistentVolume` maps to: \n\n[tabs]\n====\n{file-watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nEvery 1.0s: ls -al {mount-dir} && eval c...  devnation: Mon Jul 19 14:07:53 2021\n\ntotal 4\ndrwxr-xr-x. 1 root root 24 Jul 19 14:06 .\ndrwxr-xr-x. 1 root root 42 Jul 13 09:21 ..\n-rw-r--r--. 1 root root  5 Jul 19 14:06 greeting.txt\nJambo\n----\n--\n====\n\n==== Clean Up\n\ninclude::partial$terminal-cleanup.adoc[tags=**;!*;term-exec;term3-ssh]\n\nOnce all is cleaned, run the following: \n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get pvc\n----\n\nResults in:\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                 STATUS   VOLUME                 CAPACITY   ACCESS MODES   STORAGECLASS   AGE\nmyboot-volumeclaim   Bound    my-persistent-volume   100Mi      RWO            pv-demo        14m\n----\n\nEven though the pod has been deleted, the PVC (and the PV) are still there and need to be deleted manually.\n\n[.console-input]\n[source,bash]\n----\nkubectl delete -f apps/kubefiles/myboot-persistent-volume-claim.yaml\nkubectl delete -f apps/kubefiles/demo-persistent-volume-hostpath.yaml\n----\n\n== Static vs Dynamic Provisioning\n\nPersistent Volumes can be provisioned dynamically or statically.\n\nStatic provisioning allows cluster administrators to make *existing* storage device available to a cluster.\nWhen it is done in this way, the PV and the PVC must be provided manually.\n\nSo far, in the last example, you've seen static provisioning.\n\nThe dynamic provisioning eliminates the need for cluster administrators to pre-provision storage. \nInstead, it automatically provisions storage when it is requested by users.\nTo make it run you need to provide a Storage Class object and a PVC referring to it.\nAfter the PVC is created, the storage device and the PV are automatically created for you.\nThe main purpose of dynamic provisioning is to work with cloud provider solutions.\n\nNormally, the Kubernetes implementation offers a default Storage Class so anyone can get started quickly with dynamic provisioning.\nYou can get information from the default Storage Class by running:\n\n[.console-input]\n[source,bash]\n----\nkubectl get sc\n----\n\n[tabs]\n====\nMinikube::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                 PROVISIONER                RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE\nstandard (default)   k8s.io/minikube-hostpath   Delete          Immediate           false                  47d\n----\n--\nOpenShift::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME            PROVISIONER             AGE\ngp2 (default)   kubernetes.io/aws-ebs   31h\n----\n\nBy default, when OpenShift is installed in a cloud provider, it automatically creates a Storage Class with the underlying persistent technology of the cloud.\nFor example in the case of AWS, a default Storage Class is provided pointing out to AWS EBS.\n--\n====\n\nThen you can create a Persistent Volume Claim which will create a Persistent Volume automatically.  Use kbd:[CTRL+p] to open `demo-dynamic-persistent.yaml` quickly:\n\n[source, yaml]\n----\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n  name: myboot-volumeclaim\nspec:\n  accessModes:\n    - ReadWriteOnce\n  resources:\n    requests:\n      storage: 10Mi\n----\n\nSince we've not specified any _storage class_ but there is one defined as the default, the _PVC_ implicitly refers to that one.  (You might consider comparing this pod definition to `myboot-persistent-volume-claim.yaml`)\n\n.Difference between static and dynamic PVC (with static PV)\nimage::pv-static-vs-dynamic.png[]\n\n[.console-input]\n[source,bash]\n----\nkubectl apply -f apps/kubefiles/demo-dynamic-persistent.yaml\n\nkubectl get pvc\n----\n\n[tabs]\n====\nMinikube::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                 STATUS    VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   AGE\nmyboot-volumeclaim   Pending                                      standard       2s\n----\n--\nOpenShift::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                 STATUS    VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   AGE\nmyboot-volumeclaim   Pending                                      gp2            46sç\n----\n--\n====\n\nNotice that the _PVC_ is in _Pending_ STATUS, because remember that we are creating dynamic storage and it means that while the _pod_ doesn't request the volume, the _PVC_ will remain in a pending state and the _PV_ will not be created.\n\n[.console-input]\n[source,bash]\n----\nkubectl apply -f apps/kubefiles/myboot-pod-volume-pvc.yaml\n----\n\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME          READY   STATUS    RESTARTS   AGE\nmyboot-demo   1/1     Running   0          2m36s\n----\n--\n====\n\nWhen the pod is in _Running_ status, then you can get _PVC_ and _PV_ parameters.\n\n[.console-input]\n[source,bash]\n----\nkubectl get pvc\n----\n\n[tabs]\n====\nMinikube::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                 STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE\nmyboot-volumeclaim   Bound    pvc-170f2e9a-4afc-4869-bd19-f10c86bff34b   10Mi       RWO            standard       5s\n----\n--\nOpenShift::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                 STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE\nmyboot-volumeclaim   Bound    pvc-6de4f27e-bd40-4b58-bb46-91eb08ca5bd7   1Gi        RWO            gp2            116s\n----\n--\n====\n\n\nNotice that now the volume claim is  _Bound_ to a volume.\n\nFinally, you can check that the _PV_ has been created automatically:\n\n[.console-input]\n[source,bash]\n----\nkubectl get pv\n----\n\n[tabs]\n====\nMinikube::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                        STORAGECLASS   REASON   AGE\npvc-170f2e9a-4afc-4869-bd19-f10c86bff34b   10Mi       RWO            Delete           Bound    myspace/myboot-volumeclaim   standard                56s\n----\n--\nOpenShift::\n+\n--\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                        STORAGECLASS   REASON   AGE\npvc-6de4f27e-bd40-4b58-bb46-91eb08ca5bd7   1Gi        RWO            Delete           Bound    default/myboot-volumeclaim   gp2                     77s\n----\n--\n====\n\nNotice that the _CLAIM_ field points to the _PVC_ responsible for the creation of the _PV_.\n\n=== Clean Up\n\n[.console-input]\n[source,bash]\n----\nkubectl delete -f apps/kubefiles/myboot-pod-volume-pvc.yaml\nkubectl delete -f apps/kubefiles/demo-dynamic-persistent.yaml\n----\n\n== Distributed Filesystems\n\nIt is important to notice that cloud-providers offer distributed storages so data is always available in all the nodes.\nAs you've seen in the last example, this storage class guarantees that all nodes see the same disk content.\n\nFor example, if you are using Kubernetes/OpenShift on-prem or if you don't want to relay to a vendor solution, there is also support for distributed filesystems in Kubernetes.\nIf that's the case, we recommend you use NFS, https://www.gluster.org/[GlusterFS ] or https://ceph.io/[Ceph].\n"
  },
  {
    "path": "documentation/modules/ROOT/partials/create-greeting-file.adoc",
    "content": "[.console-input]\n[source,bash]\n----\nkubectl exec -ti myboot-demo -- /bin/bash\n----\n\nand then from within the pod, generate some content to `/tmp/demo` directory.\n\n[.console-input]\n[source,bash]\n----\ncurl localhost:8080/appendgreetingfile\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/partials/describe-deployment.adoc",
    "content": "[#{section-k8s}-kubectl-describe-deployment]\n[.console-input]\n[source, bash,subs=\"+macros,+attributes\"]\n----\nkubectl describe deployment {describe-deployment-name}\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/partials/describe.adoc",
    "content": "[#{section-k8s}-kubectl-describe-services]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nPODNAME=$(kubectl get pod -l {label-describe} --field-selector pass:['status.phase!=Terminating'] -o name)\nkubectl describe $PODNAME\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/partials/env-curl.adoc",
    "content": "[tabs]\n====\nMinikube::\n+\n--\n:tmp-service-exposed: {service-exposed}\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nIP=$(minikube ip -p devnation)\nPORT=$(kubectl get service/{tmp-service-exposed} -o jsonpath=\"{.spec.ports[*].nodePort}\")\n----\n--\nHosted::\n+\n--\nIf using a hosted Kubernetes cluster like OpenShift then use curl and the EXTERNAL-IP address with port `8080` or get it using `kubectl`:\n\n:tmp-service-exposed: {service-exposed}\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nIP=$(kubectl get service {tmp-service-exposed} -o jsonpath=\"{.status.loadBalancer.ingress[0].ip}\")\nPORT=$(kubectl get service {tmp-service-exposed} -o jsonpath=\"{.spec.ports[*].port}\")\n----\n\nIMPORTANT: If you are in AWS, you need to get the `hostname` instead of `ip.`\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nIP=$(kubectl get service {tmp-service-exposed} -o jsonpath=\"{.status.loadBalancer.ingress[0].hostname}\")\n----\n--\n====\n\nCurl the Service:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\ncurl $IP:$PORT\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/partials/file-watch-command.adoc",
    "content": "[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nwatch -n1 -- \"ls -al {mount-dir} && eval \"\"cat {mount-dir}/greeting.txt\"\"\"\n----"
  },
  {
    "path": "documentation/modules/ROOT/partials/loop.adoc",
    "content": "[#{section-k8s}-curl-loop]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nwhile true\ndo curl $IP:$PORT\nsleep {curl-loop-sleep-time}\ndone\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/partials/namespace-setup-tip.adoc",
    "content": "[TIP]\n====\nYou will need to create the `{set-namespace}` if you haven't already.  Check for the existence of the namespace with\n\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\nkubectl get ns {set-namespace}\n----\n\nIf the response is: \n\n[.console-output]\n[source,bash, subs=\"+attributes\"]\n----\nError from server (NotFound): namespaces \"{set-namespace}\" not found\n----\n\nThen you can create the namespace with: \n\n[.console-input]\n[source, bash, subs=\"+attributes\"]\n----\nkubectl create ns {set-namespace}\n----\n===="
  },
  {
    "path": "documentation/modules/ROOT/partials/open-terminal-in-editor-inset.adoc",
    "content": ".VSCode: Open Terminal in Editor\n****\nIf you're doing this tutorial from within VSCode you may be running out of space to put your terminals at this point!  If you have a recent release of VSCode, you might consider opening a new terminal in the editor pane by using kbd:[CTRL+SHIFT+p] (or kbd:[CMD+SHIFT+p] on Mac OSX) to run the `Terminal: Create Terminal in Editor Area` command\n****"
  },
  {
    "path": "documentation/modules/ROOT/partials/optional-requisites.adoc",
    "content": "The following CLI tools are optional for running the exercises in this tutorial.\nAlthough they are used in the tutorial, you could use others without any problem.\n\n[cols=\"4*^,4*.\",options=\"header,+attributes\"]\n|===\n|**Tool**|**macOS**|**Fedora**|**windows**\n\n| https://github.com/mikefarah/yq[yq v2.4.1]\n| https://github.com/mikefarah/yq/releases/download/2.4.1/yq_darwin_amd64[Download]\n| https://github.com/mikefarah/yq/releases/download/2.4.1/yq_linux_amd64[Download]\n| https://github.com/mikefarah/yq/releases/download/2.4.1/yq_windows_amd64.exe[Download]\n\n| https://github.com/stedolan/jq[jq v1.6.0]\n| https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64[Download]\n| https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64[Download]\n| https://github.com/stedolan/jq/releases/download/jq-1.6/jq-win64.exe[Download]\n\n| https://httpie.org/[httpie]\n| `brew install httpie`\n| `dnf install httpie`\n| https://httpie.org/doc#windows-etc\n\n| watch\n| `brew install watch`\n| `dnf install procps-ng`\n|\n\n| kubectx and kubens\n| `brew install kubectx`\n| https://github.com/ahmetb/kubectx\n|\n\n| https://github.com/rakyll/hey[hey]\n| `brew install hey`\n| https://storage.googleapis.com/jblabs/dist/hey_linux_v0.1.2[Download]\n| https://hey-release.s3.us-east-2.amazonaws.com/hey_windows_amd64[Download]\n\n| https://github.com/wercker/stern[stern]\n| `brew install stern`\n| https://github.com/stern/stern/releases/download/v{stern-version}/stern_{stern-version}_linux_amd64.tar.gz[Download]\n| https://github.com/stern/stern/releases/download/v{stern-version}/stern_{stern-version}_windows_amd64.tar.gz[Download]\n"
  },
  {
    "path": "documentation/modules/ROOT/partials/prerequisites-kubernetes.adoc",
    "content": ":kubernetes-version: v1.34.0\n:minikube-version: v1.37.0\n:maven-version: 3.9.9\n:java-version: 21\n:stern-version: 1.33.0\n\nThe following CLI tools are required for running the exercises in this tutorial. \nPlease have them installed and configured before you get started with any of the tutorial chapters.\n\n[cols=\"4*^,4*.\",options=\"header,+attributes\"]\n|===\n|**Tool**|**macOS**|**Fedora**|**windows**\n\n| `Git`\n| https://git-scm.com/download/mac[Download]\n| https://git-scm.com/download/linux[Download]\n| https://git-scm.com/download/win[Download]\n\n| `Docker`\n| https://docs.docker.com/docker-for-mac/install[Docker for Mac]\n| `dnf install docker`\n| https://docs.docker.com/docker-for-windows/install[Docker for Windows]\n\n| `VirtualBox`\n| https://download.virtualbox.org/virtualbox/7.2.2/VirtualBox-7.2.2-170484-OSX.dmg[Download]\n| No need for VirtualBox on Linux since you can rely on embedded kernel virtualization\n| https://download.virtualbox.org/virtualbox/7.2.2/VirtualBox-7.2.2-170484-Win.exe[Download]\n\n| `https://kubernetes.io/docs/tasks/tools/install-minikube[Minikube] {minikube-version}`\n| https://github.com/kubernetes/minikube/releases/download/{minikube-version}/minikube-darwin-amd64[Download]\n| https://github.com/kubernetes/minikube/releases/download/{minikube-version}/minikube-linux-amd64[Download]\n| https://github.com/kubernetes/minikube/releases/download/{minikube-version}/minikube-windows-amd64.exe[Download]\n\n| `kubectl {kubernetes-version}`\n| https://storage.googleapis.com/kubernetes-release/release/{kubernetes-version}/bin/darwin/amd64/kubectl[Download]\n| https://storage.googleapis.com/kubernetes-release/release/{kubernetes-version}/bin/linux/amd64/kubectl[Download]\n| https://storage.googleapis.com/kubernetes-release/release/{kubernetes-version}/bin/windows/amd64/kubectl.exe[Download]\n\n\n| `Apache Maven {maven-version}`\n| https://archive.apache.org/dist/maven/maven-3/{maven-version}/binaries/apache-maven-{maven-version}-bin.tar.gz[Download]\n| https://archive.apache.org/dist/maven/maven-3/{maven-version}/binaries/apache-maven-{maven-version}-bin.tar.gz[Download]\n| https://archive.apache.org/dist/maven/maven-3/{maven-version}/binaries/apache-maven-{maven-version}-bin.tar.gz[Download]\n\n| `Java {java-version}`\n| https://adoptium.net/installation/\n| https://adoptium.net/installation/ alternatively: \n\n`dnf install java-{java-version}-openjdk-devel`\n| https://adoptium.net/installation/ (Make sure you set the `JAVA_HOME` environment variable and add `%JAVA_HOME%\\bin` to your `PATH`)\n"
  },
  {
    "path": "documentation/modules/ROOT/partials/set-context.adoc",
    "content": "[#{section-k8s}-change-context-resource]\n[.console-input]\n[source, bash, subs=\"+macros,+attributes\"]\n----\nkubectl config set-context --current --namespace={set-namespace}\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/partials/stern-watch.adoc",
    "content": "[#{section-k8s}-kubectl-watch-logs]\n\n// FIXME: the attributes inside the code block in the tab don't get filled in \n// if they are not first used outside the tab block\nWe are going to have stern watch the {stern-namespace} namespace for {stern-pattern}\n\n[tabs]\n====\n{log-terminal} ::\n+\n--\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nstern -n {stern-namespace} {stern-pattern}\n----\n\n--\n====\n\n"
  },
  {
    "path": "documentation/modules/ROOT/partials/taint-remove-taint.adoc",
    "content": "// tag::openshift[]\n:chosen-node: ip-10-0-140-186.eu-central-1.compute.internal\n// end::openshift[]\n// tag::minikube[]\n:chosen-node: devnation-m02 \n// end::minikube[]\n\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl describe pod #<.>\n----\n<.> There is only one pod in this case.  If we wanted to be specific, we could add the name of the pod (e.g. `myboot-7f889dd6d-n5z55`)\n\n// tag::openshift[]\n[.console-output]\n[source,bash,subs=\"+quotes\"]\n----\nName:           myboot-7f889dd6d-n5z55\nNamespace:      kubetut\nPriority:       0\nNode:           <none>\nLabels:         app=myboot\n                pod-template-hash=7f889dd6d\nAnnotations:    openshift.io/scc: restricted\nStatus:         Pending\n\nNode-Selectors:  <none>\nTolerations:     node.kubernetes.io/not-ready:NoExecute for 300s\n                 node.kubernetes.io/unreachable:NoExecute for 300s\nEvents:\n  Type     Reason            Age        From               Message\n  ----     ------            ----       ----               -------\n  Warning  FailedScheduling  <unknown>  default-scheduler  #0/9 nodes are available: 9 node(s) had taints that the pod didn't tolerate.#\n  Warning  FailedScheduling  <unknown>  default-scheduler  #0/9 nodes are available: 9 node(s) had taints that the pod didn't tolerate.#\n----\n// end::openshift[]\n\n// tag::minikube[]\n[.console-output]\n[source,bash,subs=\"+quotes\"]\n----\nName:           myboot-7cbfbd9b89-bzhxw\nNamespace:      myspace\nPriority:       0\nNode:           <none>\nLabels:         app=myboot\n                pod-template-hash=7cbfbd9b89\nAnnotations:    <none>\nStatus:         Pending\n...\nNode-Selectors:              <none>\nTolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s\n                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s\nEvents:\n  Type     Reason            Age                From               Message\n  ----     ------            ----               ----               -------\n  Warning  FailedScheduling  13s (x2 over 14s)  default-scheduler  #0/2 nodes are available: 2 node(s) had taint {color: blue}, that the pod didn't tolerate.#\n----\n// end::minikube[]\n\nLet's get the list of nodes in our cluster \n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get nodes\n----\n\n[.console-output]\n[source,bash]\n----\n# tag::openshift[]\nNAME                                            STATUS   ROLES    AGE   VERSION\nip-10-0-136-107.eu-central-1.compute.internal   Ready    master   20h   v1.16.2\nip-10-0-140-186.eu-central-1.compute.internal   Ready    worker   20h   v1.16.2\nip-10-0-141-128.eu-central-1.compute.internal   Ready    worker   18h   v1.16.2\nip-10-0-146-109.eu-central-1.compute.internal   Ready    worker   18h   v1.16.2\nip-10-0-150-226.eu-central-1.compute.internal   Ready    worker   20h   v1.16.2\nip-10-0-155-122.eu-central-1.compute.internal   Ready    master   20h   v1.16.2\nip-10-0-162-206.eu-central-1.compute.internal   Ready    worker   20h   v1.16.2\nip-10-0-168-102.eu-central-1.compute.internal   Ready    master   20h   v1.16.2\nip-10-0-175-64.eu-central-1.compute.internal    Ready    worker   18h   v1.16.2\n# end::openshift[]\n# tag::minikube[]\nNAME            STATUS   ROLES                  AGE     VERSION\ndevnation       Ready    control-plane,master   2d22h   v1.21.2\ndevnation-m02   Ready    <none>                 40h     v1.21.2\n# end::minikube[]\n----\n\nAnd pick one node that we will *remove* the taint from:\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl taint node {chosen-node} color:NoSchedule- #<.>\n----\n<.> adding the `-` here means to remove the taint in question (the `color` with the action `NoSchedule`)\n\n[.console-output]\n[source,bash,subs=\"+attributes\"]\n----\nnode/{chosen-node}  untainted\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/partials/terminal-cleanup.adoc",
    "content": "[tabs]\n====\nTerminal 1::\n+\n--\n// tag::term-exec[]\nExit the `exec` command\n\n[.console-input]\n[source,bash]\n----\nexit\n----\n// end::term-exec[]\n\nNow delete the pod\n\n[.console-input]\n[source,bash]\n----\nkubectl delete pod myboot-demo\n----\n\n--\n// tag::term2[]\nTerminal 2::\n+\n--\n\n[.console-input]\n[source,bash]\n----\nexit\n----\n\nThis should close out the terminal\n--\n// end::term2[]\n// tag::term3[]\nTerminal 3::\n+\n--\n\nClose out the terminal window by typing the following in it\n\n[.console-input]\n[source,bash]\n----\nexit\n----\n\n--\n// end::term3[]\n// tag::term3-ssh[]\nTerminal 3::\n+\n--\nHit kbd:[CTRL+c] to exit out of the `watch`\n\nAnd then in the `ssh` shell type\n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nexit\n----\n--\n// end::term3-ssh[]\n===="
  },
  {
    "path": "documentation/modules/ROOT/partials/tip_vscode_kube_editor.adoc",
    "content": "[TIP]\n====\nIf you're running this tutorial from within VSCode or would like to use VSCode to edit the resource targeted, make sure you set the following environment variable before issuing the `kubectl edit`: \n\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nexport KUBE_EDITOR=\"code -w\"\n---- \n===="
  },
  {
    "path": "documentation/modules/ROOT/partials/tip_vscode_quick_open.adoc",
    "content": "[TIP]\n====\nIf you're running this from within VSCode you can use kbd:[CTRL+p] (or kbd:[CMD+p] on Mac OSX) to quickly open `{quick-open-file}`\n===="
  },
  {
    "path": "documentation/modules/ROOT/partials/watch-node-directory.adoc",
    "content": "\nLet's use the `minikube ssh` command to simulate a connection to the kubernetes node.  (There is only one node running in minikube)\n\n[.console-input]\n[source,bash]\n----\nminikube ssh\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nLast login: Tue Jul 13 08:26:18 2021 from 192.168.49.1\ndocker@devnation:~$\n----\n\nNow that we're there, let's watch the `{mount-dir}` directory that the pod has mounted as `/tmp/demo`\n\ninclude::partial$file-watch-command.adoc[]"
  },
  {
    "path": "documentation/modules/ROOT/partials/watching-pods-with-nodes.adoc",
    "content": "[#{section-k8s}-kubectl-watch-pods]\n[tabs]\n====\n{watch-terminal}::\n+\n--\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nwatch -n 1 \"kubectl get pods -o wide \\#<.>\n  | awk '{print \\$1 \\\" \\\" \\$2 \\\" \\\" \\$3 \\\" \\\" \\$5 \\\" \\\" \\$7}' | column -t\" #<.>\n----\n<.> the `-o wide` option allows us to see the node that the pod is schedule to\n<.> to keep the line from getting too long we'll use `awk` and `column` to get and format only the columns we want\n\n--\n===="
  },
  {
    "path": "documentation/modules/ROOT/partials/watching-pods.adoc",
    "content": "[#{section-k8s}-kubectl-watch-pods]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nwatch -n 1 -- kubectl get pods\n----\n"
  },
  {
    "path": "documentation/modules/ROOT/partials/watching-services.adoc",
    "content": "[#{section-k8s}-kubectl-watch-services]\n[.console-input]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nkubectl get services -w\n----\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME    TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE\nmyapp   LoadBalancer   172.30.103.41   <pending>     8080:31974/TCP   4s\n----\n\nWait until you see an external IP assigned.\n\nNOTE: On Minikube without an Ingress controller, <pending> will not become a real external IP.  https://kubernetes.io/docs/tasks/access-application-cluster/ingress-minikube/[Optional: Setup Minikube Ingress]\n\n[.console-output]\n[source,bash,subs=\"+macros,+attributes\"]\n----\nNAME    TYPE           CLUSTER-IP      EXTERNAL-IP     PORT(S)          AGE\nmyapp   LoadBalancer   172.30.103.41   34.71.122.153   8080:31974/TCP   44s\n----\n"
  },
  {
    "path": "documentation/modules/_attributes.adoc",
    "content": ""
  },
  {
    "path": "github-pages-stage.yml",
    "content": "runtime:\n  cache_dir: ./.cache/antora\n\nsite:\n  title: Kubernetes Tutorial (Proposed Changes)\n  url: https://hatmarch.github.io/kubernetes-tutorial/\n  start_page: kubernetes-tutorial::index.adoc\n\ncontent:\n  sources:\n    - url: .\n      branches: [v1.29,v1.34]\n      start_path: documentation\n      \nasciidoc:\n  attributes:\n    release-version: v1.34\n    # This attribute enables creating footer based navigation to move to the next or prev page as defined in nav.adoc\n    page-pagination: true\n    # Set this to true to hide the versions component that appears at the bottom of the side nav\n    page-hide-versions-component: true\n    # Set myrepo to the organization in the image registry you want to reference in the tutorial\n    myrepo: your-repo\n    # Set the docker-host attribute to the hostname that should be used to refer to target of curl commands in a container\n    # for example: docker.for.mac.localhost\n    docker-host: localhost\n    experimental: true\n    curl-loop-sleep-time: .8\n  extensions:\n    - ./lib/tab-block.js\n    - ./lib/remote-include-processor.js\n    \nui:\n  bundle:\n    url: https://github.com/redhat-scholars/course-ui/releases/download/v0.1.14/ui-bundle.zip\n    snapshot: true\n  supplemental_files: ./supplemental-ui\n\noutput:\n  dir: ./gh-pages\n"
  },
  {
    "path": "github-pages.yml",
    "content": "runtime:\n  cache_dir: ./.cache/antora\n\nsite:\n  title: Kubernetes Tutorial\n  url: https://redhat-scholars.github.io/kubernetes-tutorial/\n  start_page: kubernetes-tutorial::index.adoc\n\ncontent:\n  sources:\n    - url: .\n      branches: [v1.29,v1.34]\n      start_path: documentation\n      \nasciidoc:\n  attributes:\n    # This attribute allows you to control which version of your documentation is generated by Antora\n    release-version: v1.29\n    # This attribute enables creating footer based navigation to move to the next or prev page as defined in nav.adoc\n    page-pagination: true\n    # Set this to true to hide the versions component that appears at the bottom of the side nav\n    page-hide-versions-component: false\n    # Set myrepo to the organization in the image registry you want to reference in the tutorial\n    myrepo: your-repo\n    # Set the docker-host attribute to the hostname that should be used to refer to target of curl commands in a container\n    # for example: docker.for.mac.localhost\n    docker-host: localhost\n    experimental: true\n    curl-loop-sleep-time: .8\n  extensions:\n    - ./lib/tab-block.js\n    - ./lib/remote-include-processor.js\n    \nui:\n  bundle:\n    url: https://github.com/redhat-scholars/course-ui/releases/download/v0.1.14/ui-bundle.zip\n    snapshot: true\n  supplemental_files: ./supplemental-ui\n\noutput:\n  dir: ./gh-pages\n"
  },
  {
    "path": "gulpfile.babel.js",
    "content": "/*jshint esversion: 6 */\n\nimport { series, watch } from \"gulp\";\nimport { remove } from \"fs-extra\";\nimport { readFileSync } from \"fs\";\nimport {load as yamlLoad} from \"yaml-js\";\nimport generator from \"@antora/site-generator-default\";\nimport browserSync from \"browser-sync\";\n\nconst filename = \"github-pages.yml\";\nconst server = browserSync.create();\nconst args = [\"--playbook\", filename];\n\n//Watch Paths\nfunction watchGlobs() {\n  let json_content = readFileSync(`${__dirname}/${filename}`, \"UTF-8\");\n  let yaml_content = yamlLoad(json_content);\n  let dirs = yaml_content.content.sources.map(source => [\n    `**/*.yml`,\n    `**/*.adoc`,\n    `**/*.hbs`\n  ]); \n  dirs.push([\"${filename}\"]);\n  dirs = [].concat(...dirs);\n  //console.log(dirs);\n  return dirs;\n}\n\nconst siteWatch = () => watch(watchGlobs(), series(build, reload));\n\nconst removeSite = done => remove(\"gh-pages\", done);\nconst removeCache = done => remove(\".cache\", done);\n\nfunction build(done) {\n  generator(args, process.env)\n    .then(() => {\n      done();\n    })\n    .catch(err => {\n      console.log(err);\n      done();\n    });\n}\n\nfunction workshopSite(done){\n  generator([\"--pull\", \"--stacktrace\",\"--playbook\",\"workshop-site.yaml\"], process.env)\n    .then(() => {\n      done();\n    })\n    .catch(err => {\n      console.log(err);\n      done();\n    });\n}\n\nfunction reload(done) {\n  server.reload();\n  done();\n}\n\nfunction serve(done) {\n  server.init({\n    server: {\n      baseDir: \"./gh-pages\"\n    }\n  });\n  done();\n}\n\nconst _build = build;\nexport { _build as build };\nconst _clean = series(removeSite, removeCache);\nexport { _clean as clean };\nconst _default = series(_clean, build, serve, siteWatch);\nexport { _default as default };\n//build workshop docs\nconst _wsite = series(_clean, workshopSite);\nexport { _wsite as workshopSite };\n"
  },
  {
    "path": "lib/copy-to-clipboard.js",
    "content": "const BlockCopyToClipboardMacro = (() => {\n  const $context = Symbol(\"context\");\n  const superclass = Opal.module(null, \"Asciidoctor\").Extensions\n    .BlockMacroProcessor;\n  const scope = Opal.klass(\n    Opal.module(null, \"Antora\"),\n    superclass,\n    \"BlockCopyToClipboardMacro\",\n    function() {}\n  );\n\n  Opal.defn(scope, \"$initialize\", function initialize(name, config, context) {\n    Opal.send(\n      this,\n      Opal.find_super_dispatcher(this, \"initialize\", initialize),\n      [name, config]\n    );\n    this[$context] = context;\n  });\n\n  Opal.defn(scope, \"$process\", function(parent, target, attrs) {\n    const t = target.startsWith(\":\") ? target.substr(1) : target;\n    //console.log(\"target:\", t);\n    const createHtmlFragment = html => this.createBlock(parent, \"pass\", html);\n    const html = `<button class=\"copybtn float-right\" title=\"Copy to clipboard\" data-clipboard-target=\"#${t}\"><i class=\"fa fa-clipboard\"></i></button><br/>`;\n    parent.blocks.push(createHtmlFragment(html));\n  });\n\n  return scope;\n})();\n\nmodule.exports.register = (registry, context) => {\n  registry.blockMacro(\n    BlockCopyToClipboardMacro.$new(\"copyToClipboard\", Opal.hash(), context)\n  );\n};\n"
  },
  {
    "path": "lib/remote-include-processor.js",
    "content": "module.exports = function () {\n    this.includeProcessor(function () {\n      this.$option('position', '>>')\n      this.handles((target) => target.startsWith('http'))\n      this.process((doc, reader, target, attrs) => {\n        const contents = require('child_process').execFileSync('curl', ['--silent', '-L', target], { encoding: 'utf8' })\n        reader.pushInclude(contents, target, target, 1, attrs)\n      })\n    })\n  }\n"
  },
  {
    "path": "lib/tab-block.js",
    "content": "/**\n * Extends the AsciiDoc syntax to support a tabset element. The tabset is\n * created from a dlist that is enclosed in an example block marked with the\n * tabs style.\n *\n * Usage:\n *\n *  [tabs]\n *  ====\n *  Tab A::\n *  +\n *  --\n *  Contents of tab A.\n *  --\n *  Tab B::\n *  +\n *  --\n *  Contents of tab B.\n *  --\n *  ====\n *\n * To use this extension, register the extension.js file with Antora (i.e.,\n * list it as an AsciiDoc extension in the Antora playbook file), combine\n * styles.css with the styles for the site, and combine behavior.js with the\n * JavaScript loaded by the page.\n *\n * @author Dan Allen <dan@opendevise.com>\n */\nconst IdSeparatorChar = '-'\nconst InvalidIdCharsRx = /[^a-zA-Z0-9_]/g\nconst List = Opal.const_get_local(Opal.module(null, 'Asciidoctor'), 'List')\nconst ListItem = Opal.const_get_local(Opal.module(null, 'Asciidoctor'), 'ListItem')\n\nconst generateId = (str, idx) => `tabset${idx}_${str.toLowerCase().replace(InvalidIdCharsRx, IdSeparatorChar)}`\n\nfunction tabsBlock () {\n  this.onContext('example')\n  this.process((parent, reader, attrs) => {\n    const createHtmlFragment = (html) => this.createBlock(parent, 'pass', html)\n    const tabsetIdx = parent.getDocument().counter('idx-tabset')\n    const nodes = []\n    nodes.push(createHtmlFragment('<div class=\"tabset is-loading\">'))\n    const container = this.parseContent(this.createBlock(parent, 'open'), reader)\n    const sourceTabs = container.getBlocks()[0]\n    if (!(sourceTabs && sourceTabs.getContext() === 'dlist' && sourceTabs.getItems().length)) return\n    const tabs = List.$new(parent, 'ulist')\n    tabs.addRole('tabs')\n    const panes = {}\n    sourceTabs.getItems().forEach(([[title], details]) => {\n      const tab = ListItem.$new(tabs)\n      tabs.$append(tab)\n      const id = generateId(title.getText(), tabsetIdx)\n      tab.text = `[[${id}]]${title.text}`\n      let blocks = details.getBlocks()\n      const numBlocks = blocks.length\n      if (numBlocks) {\n        if (blocks[0].context === 'open' && numBlocks === 1) blocks = blocks[0].getBlocks()\n        panes[id] = blocks.map((block) => (block.parent = parent) && block)\n      }\n    })\n    nodes.push(tabs)\n    nodes.push(createHtmlFragment('<div class=\"content\">'))\n    Object.entries(panes).forEach(([id, blocks]) => {\n      nodes.push(createHtmlFragment(`<div class=\"tab-pane\" aria-labelledby=\"${id}\">`))\n      nodes.push(...blocks)\n      nodes.push(createHtmlFragment('</div>'))\n    })\n    nodes.push(createHtmlFragment('</div>'))\n    nodes.push(createHtmlFragment('</div>'))\n    parent.blocks.push(...nodes)\n  })\n}\n\nmodule.exports.register = (registry, context) => {\n  registry.block('tabs', tabsBlock)\n}\n"
  },
  {
    "path": "package.json",
    "content": "{\n  \"name\": \"kubernetes-tutorial-site\",\n  \"description\": \"Kubernetes Tutorial Documentation\",\n  \"homepage\": \"https://redhat-scholars.github.io/kubernetes-tutorial\",\n  \"author\": {\n    \"email\": \"kamesh.sampath@hotmail.com\",\n    \"name\": \"Kamesh Sampath\",\n    \"url\": \"https://twitter.com/@kamesh_sampath\"\n  },\n  \"dependencies\": {\n    \"@antora/cli\": \"2.3.1\",\n    \"@antora/site-generator-default\": \"2.3.1\",\n    \"@babel/cli\": \"^7.5.5\",\n    \"@babel/core\": \"^7.5.5\",\n    \"@babel/polyfill\": \"^7.4.4\",\n    \"@babel/preset-env\": \"^7.5.5\",\n    \"@babel/register\": \"^7.5.5\",\n    \"browser-sync\": \"^2.26.7\",\n    \"fs-extra\": \"^8.1.0\",\n    \"gulp\": \"^4.0.0\",\n    \"yaml-js\": \"^0.2.3\"\n  },\n  \"devDependencies\": {},\n  \"scripts\": {\n    \"dev\": \"gulp\",\n    \"clean\": \"gulp clean\",\n    \"workshop\": \"gulp workshopSite\"\n  },\n  \"repository\": {\n    \"type\": \"git\",\n    \"url\": \"git+https://github.com/redhat-scholars/kubernetes-tutorial.git\"\n  },\n  \"license\": \"Apache-2.0\",\n  \"babel\": {\n    \"presets\": [\n      \"@babel/preset-env\"\n    ]\n  }\n}\n"
  },
  {
    "path": "scripts/create-kubeconfig.sh",
    "content": "#!/bin/bash\n\nset -euo pipefail\n\ndeclare MINIKUBE_HOST=${1:-192.168.86.48}\n\n# private key to be used to authenticate with MINIKUBE_HOST for USER\ndeclare KEYFILE_PATH=${2:-${HOME}/.ssh/emu-fedora}\n\n# user on the minikube host\ndeclare USER=${3:-mwh}\n\ndeclare MINIKUBE_PROFILE_NAME=${4:-minikube}\n\n# as the remote server, assume we want the kubeconfig at the exported KUBECONFIG location\ndeclare REMOTE_KUBECONFIG_PATH=${KUBECONFIG}\ndeclare CERTS_DIR=\"$DEMO_HOME/$CONFIG_SUBDIR/certs\"\n\n\nif [[ -f $REMOTE_KUBECONFIG_PATH ]]; then\n    echo \"Removing old config file at ${REMOTE_KUBECONFIG_PATH}\"\n    rm ${REMOTE_KUBECONFIG_PATH}\nfi\n\nif [[ ! -d ${CERTS_DIR} ]]; then\n    echo \"Creating certs dir ${CERTS_DIR}\"\n    mkdir -p ${CERTS_DIR}\nfi\n\n# Assume this is run right after minikube is setup on host machine for user ${USER}\nif [[ $MINIKUBE_HOST == \"localhost\" ]]; then\n    cp ~/.kube/config ${REMOTE_KUBECONFIG_PATH}\nelse\n    scp -i ${KEYFILE_PATH} ${USER}@${MINIKUBE_HOST}:~/.kube/config ${REMOTE_KUBECONFIG_PATH}\nfi\n\n# find the host directory for certs.  For kubectl config documentation and examples, see\n# this site: https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration\nCA_CRT=$(kubectl config view -o jsonpath=\"{.clusters[?(@.name == \\\"${MINIKUBE_PROFILE_NAME}\\\")].cluster.certificate-authority}\" --kubeconfig=${REMOTE_KUBECONFIG_PATH})\nkubectl config set clusters.${MINIKUBE_PROFILE_NAME}.certificate-authority \"${CERTS_DIR}/$(basename ${CA_CRT})\" --kubeconfig=${REMOTE_KUBECONFIG_PATH}\nCLIENT_CRT=$(kubectl config view -o jsonpath=\"{.users[?(@.name == \\\"${MINIKUBE_PROFILE_NAME}\\\")].user.client-certificate}\" --kubeconfig=${REMOTE_KUBECONFIG_PATH})\nkubectl config set users.${MINIKUBE_PROFILE_NAME}.client-certificate \"${CERTS_DIR}/$(basename ${CLIENT_CRT})\" --kubeconfig=${REMOTE_KUBECONFIG_PATH}\nCLIENT_KEY=$(kubectl config view -o jsonpath=\"{.users[?(@.name == \\\"${MINIKUBE_PROFILE_NAME}\\\")].user.client-key}\" --kubeconfig=${REMOTE_KUBECONFIG_PATH})\nkubectl config set users.${MINIKUBE_PROFILE_NAME}.client-key \"${CERTS_DIR}/$(basename ${CLIENT_KEY})\" --kubeconfig=${REMOTE_KUBECONFIG_PATH}\n\ndeclare FILES=( ${CA_CRT} ${CLIENT_CRT} ${CLIENT_KEY} )\nfor HOST_FILE_PATH in ${FILES[@]}; do\n    FILE_NAME=$(basename ${HOST_FILE_PATH})\n    REMOTE_CERT_FILE_PATH=\"${CERTS_DIR}/${FILE_NAME}\"\n    \n    if [[ $MINIKUBE_HOST == \"localhost\" ]]; then\n        LOCAL_CERT_FILE_PATH=~/.minikube/profiles/${MINIKUBE_PROFILE_NAME}/${FILE_NAME}\n        if [[ ! -f \"$LOCAL_CERT_FILE_PATH\" ]]; then\n            LOCAL_CERT_FILE_PATH=~/.minikube/${FILE_NAME}\n        fi       \n        \n        echo \"Copying ${FILE_NAME} from ${LOCAL_CERT_FILE_PATH} to ${REMOTE_CERT_FILE_PATH}.\"\n    \n        cp \"${LOCAL_CERT_FILE_PATH}\" \"${REMOTE_CERT_FILE_PATH}\"\n    else\n        echo \"Copying ${FILE_NAME} from ${MINIKUBE_HOST}:${HOST_FILE_PATH} to ${REMOTE_CERT_FILE_PATH}.\"\n\n        scp -i ${KEYFILE_PATH} ${USER}@${MINIKUBE_HOST}:${HOST_FILE_PATH} ${REMOTE_CERT_FILE_PATH}\n    fi\n    \ndone\n\n\n\n# Reset the server on the config to the current host\nif [[ ${MINIKUBE_HOST} != \"localhost\" ]]; then\n    kubectl config set clusters.${MINIKUBE_PROFILE_NAME}.server \"https://${MINIKUBE_HOST}:8443\" --kubeconfig=${REMOTE_KUBECONFIG_PATH}  \nfi"
  },
  {
    "path": "scripts/github-pages-publish.sh",
    "content": "#!/bin/bash\n\nset -euo pipefail\n\ndeclare SITE=${1:-github-pages-stage.yml}\ndeclare REPO=${2:-$(git remote get-url origin)}\ndeclare BRANCH=\"gh-pages\"\n\necho \"Removing old publish directory\"\nif [[ -d $DEMO_HOME/gh-publish ]]; then\n    rm -rf $DEMO_HOME/gh-publish \nfi\n\necho \"Removing antora cache directory\"\nif [[ -d $DEMO_HOME/.cache ]]; then\n    rm -rf $DEMO_HOME/.cache \nfi\n\ngit clone -b ${BRANCH} ${REPO} $DEMO_HOME/gh-publish\n\necho \"Generating the site documentation from ${SITE}\"\n\nantora generate --stacktrace $DEMO_HOME/${SITE} --to-dir $DEMO_HOME/gh-publish\n\necho \"Pushing site to ${BRANCH} branch of ${REPO}\"\ncd $DEMO_HOME/gh-publish\ngit add --all .\ngit commit -m\"Automated Publish\" \ngit push origin\n\necho \"Site published successfully!\""
  },
  {
    "path": "scripts/minikube-server-setup.sh",
    "content": "set -euo pipefail\n\ndeclare MINIKUBE_PROFILE=${1:-devnation}\ndeclare MINIKUBE_IP=${2:-$(hostname -I | awk '{print $1}')}\ndeclare MINIKUBE_MEM=${3:-4096}\ndeclare MINIKUBE_CPU=${4:-2}\ndeclare DRIVER=${5:-kvm2}  # for MacOS you might want to use hyperkit or virtualbox\n\nminikube start --memory=${MINIKUBE_MEM} --cpus=${MINIKUBE_CPU} --driver=${DRIVER} -p ${MINIKUBE_PROFILE} --apiserver-ips=${MINIKUBE_IP}\n\nminikube config set profile ${MINIKUBE_PROFILE}\n\nIPTABLE_RULES=('LIBVIRT_FWI' 'LIBVIRT_FWO')\nfor RULE in \"${IPTABLE_RULES[@]}\"; do\n    declare RULE_INDEX=$(sudo iptables -L ${RULE} --line-numbers 2>/dev/null | grep REJECT | awk '{print $1}' | head -n 1)\n    while [[ -n \"${RULE_INDEX}\" ]]; do\n        echo \"Deleting from rule: ${RULE} index: ${RULE_INDEX}> $(sudo iptables -L ${RULE} ${RULE_INDEX})\"\n        sudo iptables -D ${RULE} ${RULE_INDEX}\n        # see if there are any other reject indeces\n        RULE_INDEX=$(sudo iptables -L ${RULE} --line-numbers 2>/dev/null | grep REJECT | awk '{print $1}' | head -n 1)\n        # echo \"New Rule Index is $RULE_INDEX\"\n    done\ndone\n\n# turn on DNAT\nsudo iptables -t nat -A PREROUTING -p tcp --dport 30000:32767 -j DNAT --to-destination $(minikube ip):30000-32767\nsudo iptables -t nat -A PREROUTING -p tcp --dport 8443 -j DNAT --to-destination $(minikube ip):8443\n\n# This can be used to restore errors in the above script removing rules\n# sudo iptables -A LIBVIRT_FWI -d 192.168.122.0/24 -o virbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT\n# sudo iptables -A LIBVIRT_FWO -s 192.168.122.0/24 -i virbr0 -j ACCEPT\n\n# turn off firewall\nsudo systemctl disable firewalld;\n\n"
  },
  {
    "path": "scripts/pod-node-columns-template.txt",
    "content": "NAME                       READY   STATUS    RESTARTS   AGE   IP       NODE     NOMINATED NODE  READINESS GATES\nmyboot2-7c5f46cbc9-hwm2v   0/1     Pending   0          31s   <none>   <none>   <none>  <none>"
  },
  {
    "path": "scripts/shell-setup.sh",
    "content": "#!/bin/bash\n\n# per the following $0 doesn't work reliably when the script is sourced:\n# https://stackoverflow.com/questions/35006457/choosing-between-0-and-bash-source.  But \n# in some cases I've found BASH_SOURCE hasn't been set correctly.\ndeclare SCRIPT=$0\nif [[ \"$SCRIPT\" == \"/bin/bash\" ]]; then\n    SCRIPT=\"${BASH_SOURCE}\"\nfi\n\nif [[ -z \"${SCRIPT}\" ]]; then\n    echo \"BASH_SOURCE: ${BASH_SOURCE}, 0 is: $0\"\n    echo \"Failed to find the running name of the script, you need to set DEMO_HOME manually\"\nfi\n\nexport DEMO_HOME=$( cd \"$(dirname \"${SCRIPT}\")/..\" ; pwd -P )\n\necho \"Welcome to kubernetes tutorial\""
  },
  {
    "path": "supplemental-ui/partials/header-content.hbs",
    "content": "<header class=\"header\">\n  <style>\n    /* override master stylesheet properties for this partial here */\n\n    .navbar-item {\n      flex: unset;\n    }\n\n    .doc {\n      max-width: unset;\n    }\n\n\n  </style>\n\n  <nav class=\"navbar\">\n    <div class=\"navbar-brand\">\n      <a class=\"navbar-item\" href=\"https://developers.redhat.com\" target=\"_blank\"><img\n          src=\"{{uiRootPath}}/img/header_logo.png\" height=\"40px\" alt=\"Red Hat Developer Program\"></a>\n      <a class=\"navbar-item\" style=\"font-size: 24px; color: white\" href=\"{{{or site.url (or siteRootUrl siteRootPath)}}}\">{{site.title}}</a>\n      <button class=\"navbar-burger\" data-target=\"topbar-nav\">\n        <span></span>\n        <span></span>\n        <span></span>\n      </button>\n    </div>\n        <div id=\"topbar-nav\" class=\"navbar-menu\">\n      <div class=\"navbar-end\">\n        <a class=\"navbar-item\" href=\"https://developers.redhat.com/ebooks/\" target=\"_blank\">Books</a>\n        <a class=\"navbar-item\" href=\"https://developers.redhat.com/cheatsheets/\" target=\"_blank\">Cheat Sheets</a>\n        <a class=\"navbar-item\" href=\"https://developers.redhat.com/events/\" target=\"_blank\">Upcoming Events</a>\n        <div class=\"navbar-item has-dropdown is-hoverable\">\n          <a class=\"navbar-link\" href=\"#\">More Tutorials</a>\n          <div class=\"navbar-dropdown\">\n            <a class=\"navbar-item\" href=\"https://redhat-scholars.github.io/outer-loop-guide/\" target=\"_blank\">Outer Loop</a>\n            <a class=\"navbar-item\" href=\"https://redhat-scholars.github.io/openshift-starter-guides/\" target=\"_blank\">OpenShift Starter</a>\n            <a class=\"navbar-item\" href=\"https://redhat-scholars.github.io/kubernetes-tutorial/\" target=\"_blank\">Kubernetes</a>\n            <a class=\"navbar-item\" href=\"https://redhat-scholars.github.io/istio-tutorial/\" target=\"_blank\">Istio</a>\n            <a class=\"navbar-item\" href=\"https://redhat-developer-demos.github.io/quarkus-tutorial/\" target=\"_blank\">Quarkus</a>\n            <a class=\"navbar-item\" href=\"https://redhat-developer-demos.github.io/knative-tutorial/\" target=\"_blank\">Knative</a>\n            <a class=\"navbar-item\" href=\"https://redhat-scholars.github.io/tekton-tutorial/\" target=\"_blank\">Tekton</a>\n          </div>\n        </div>\n      </div>\n    </div>\n  </nav>\n</header>"
  },
  {
    "path": "supplemental-ui/partials/nav-explore.hbs",
    "content": "<div class=\"nav-panel-explore{{#unless page.navigation}} is-active{{/unless}}\" {{#if page.attributes.hide-versions-component}} style=\"display: none;\" {{/if}} data-panel=\"explore\">\n  {{#if page.component}}\n  <div class=\"context\">\n    <span class=\"title\">{{page.component.title}}</span>\n    <span class=\"version\">{{page.componentVersion.displayVersion}}</span>\n  </div>\n  {{/if}}\n  <ul class=\"components\">\n    {{#each site.components}}\n    <li class=\"component{{#if (eq this @root.page.component)}} is-current{{/if}}\">\n      <span class=\"title\">{{{./title}}}</span>\n      <ul class=\"versions\">\n        {{#each ./versions}}\n        <li class=\"version\n          {{~#if (and (eq .. @root.page.component) (eq this @root.page.componentVersion))}} is-current{{/if~}}\n          {{~#if (eq this ../latestVersion)}} is-latest{{/if}}\">\n          <a href=\"{{{relativize ./url}}}\">{{./displayVersion}}</a>\n        </li>\n        {{/each}}\n      </ul>\n    </li>\n    {{/each}}\n  </ul>\n</div>"
  },
  {
    "path": "supplemental-ui/partials/nav-menu.hbs",
    "content": "{{#with page.navigation}}\n<div class=\"nav-panel-menu is-active\" data-panel=\"menu\">\n  <nav class=\"nav-menu\">\n{{> nav-tree navigation=this}}\n  </nav>\n</div>\n{{/with}}\n"
  },
  {
    "path": "supplemental-ui/partials/nav.hbs",
    "content": "<div class=\"nav-container\"{{#if page.component}} data-component=\"{{page.component.name}}\" data-version=\"{{page.version}}\"{{/if}}>\n  <aside class=\"nav\">\n    <div class=\"panels\">\n{{> nav-menu}}\n{{> nav-explore}}\n    </div>\n  </aside>\n</div>\n"
  },
  {
    "path": "vscode-asciidoc-extra.json",
    "content": "{\n    \"Add Tabs\": {\n        \"prefix\": \"tabs\",\n        \"body\": [\n            \"[tabs]\",\n            \"====\",\n            \"${1:tab1}::\",\n            \"+\",\n            \"--\",\n            \"--\",\n            \"${2:tab2}::\",\n            \"+\",\n            \"--\",\n            \"--\",\n            \"====\"\n        ],\n        \"description\": \"Add Tabs macro\"\n    },\n    \"Add clipboard\": {\n        \"prefix\": \"clipboard\",\n        \"body\": [\n            \"[#${1:clipboardid}]\",\n            \"[source,${2:bash},subs=\\\"+macros,+attributes\\\"]\",\n            \"----\",\n            \"${3}\",\n            \"----\",\n            \"copyToClipboard::$1[]\"\n        ],\n        \"description\": \"Add Source with Clipboard\"\n    },\n    \"Add Navigation\": {\n        \"prefix\": \"nav\",\n        \"body\": [\n            \"${1:*} xref:${2:page.adoc}[${3:Nav Title}]\"\n        ],\n        \"description\": \"Add new navigation\"\n    }\n}"
  }
]