[
  {
    "path": ".github/workflows/automated-build.yaml",
    "content": "name: Automated Build\n\non:\n  push:\n    branches:\n      - main\n\nenv:\n  DOCKER_BUILDKIT: 1\n\n# Note: this is copy-pasted and adapted from\n# https://github.com/jpetazzo/workflows/blob/main/.github/workflows/automated-build.yaml\n# I need to find an elegant way to manage the multi-target built 🤔\n\njobs:\n  push:\n\n    runs-on: ubuntu-latest\n    if: github.event_name == 'push'\n\n    permissions:\n      contents: read\n      packages: write\n\n    steps:\n      -\n        name: Set environment variables\n        run: |\n          IMAGES=\"\"\n          if [ \"${{ secrets.DOCKER_HUB_TOKEN }}\" ]; then\n            echo PUSH_TO_DOCKER_HUB=yes >> $GITHUB_ENV\n            IMAGES=\"$IMAGES docker.io/${{ github.repository }}\"\n            if [ \"${{ inputs.DOCKER_HUB_USERNAME }}\" ]; then\n              echo DOCKER_HUB_USERNAME=\"${{ inputs.DOCKER_HUB_USERNAME }}\" >> $GITHUB_ENV\n            else\n              echo DOCKER_HUB_USERNAME=\"${{ github.repository_owner }}\" >> $GITHUB_ENV\n            fi\n          fi\n          if true; then\n            echo PUSH_TO_GHCR=yes >> $GITHUB_ENV\n            IMAGES=\"$IMAGES ghcr.io/${{ github.repository }}\"\n          fi\n          echo 'IMAGES<<EOF' >> $GITHUB_ENV\n          for IMAGE in $IMAGES; do\n            echo $IMAGE >> $GITHUB_ENV\n            if [ \"$GITHUB_REF_TYPE\" == \"tag\" ]; then\n              echo $IMAGE:$GITHUB_REF_NAME >> $GITHUB_ENV\n            fi\n          done\n          echo 'EOF' >> $GITHUB_ENV\n\n      -\n        uses: actions/checkout@v3\n\n      -\n        name: Log into Docker Hub\n        if: env.PUSH_TO_DOCKER_HUB\n        uses: docker/login-action@v2\n        with:\n          username: ${{ env.DOCKER_HUB_USERNAME }}\n          password: ${{ secrets.DOCKER_HUB_TOKEN }}\n\n      -\n        name: Log into GitHub Container Registry\n        if: env.PUSH_TO_GHCR\n        uses: docker/login-action@v2\n        with:\n          registry: ghcr.io\n          username: ${{ github.actor }}\n          password: ${{ github.token }}\n\n      -\n        uses: docker/setup-qemu-action@v2\n\n      -\n        uses: docker/setup-buildx-action@v2\n\n      -\n        uses: docker/build-push-action@v3\n        with:\n          platforms: ${{ inputs.PLATFORMS }}\n          push: true\n          tags: ${{ env.IMAGES }}\n          cache-from: type=gha\n          cache-to: type=gha,mode=max\n\n      -\n        uses: docker/build-push-action@v3\n        with:\n          platforms: ${{ inputs.PLATFORMS }}\n          push: true\n          target: vspod\n          tags: jpetazzo/shpod:vspod,ghcr.io/jpetazzo/shpod:vspod\n          cache-from: type=gha\n          cache-to: type=gha,mode=max\n"
  },
  {
    "path": ".gitignore",
    "content": "/build\n"
  },
  {
    "path": "Brewfile.netlify",
    "content": "brew \"helm\"\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM --platform=$BUILDPLATFORM golang:alpine AS builder\nRUN apk add curl git make\nARG BUILDARCH TARGETARCH\nENV BUILDARCH=$BUILDARCH \\\n    CGO_ENABLED=0 \\\n    GOARCH=$TARGETARCH \\\n    TARGETARCH=$TARGETARCH\nCOPY helper-* /bin/\n\nFROM alpine AS addmount\nRUN apk add build-base\nCOPY addmount.c .\nRUN make addmount\n\n# https://github.com/argoproj/argo-cd/releases/latest\nFROM builder AS argocd\nRUN helper-curl bin argocd \\\n    https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-@GOARCH\n\n# https://github.com/warpstreamlabs/bento/releases\nFROM builder AS bento\nARG BENTO_VERSION=1.3.0\nRUN helper-curl tar bento \\\n    https://github.com/warpstreamlabs/bento/releases/download/v${BENTO_VERSION}/bento_${BENTO_VERSION}_linux_@GOARCH.tar.gz\n\n# https://github.com/coder/code-server/releases\nFROM builder AS code-server\nARG CODE_SERVER_VERSION=4.105.1\nRUN mkdir -p /code-server\nRUN helper-curl tar \"--directory=/code-server --strip-components=1\" \\\n    https://github.com/coder/code-server/releases/download/v${CODE_SERVER_VERSION}/code-server-${CODE_SERVER_VERSION}-linux-@CODERARCH.tar.gz\n\n# https://github.com/docker/compose/releases\nFROM builder AS compose\nARG COMPOSE_VERSION=2.40.1\nRUN helper-curl bin docker-compose \\\n    https://github.com/docker/compose/releases/download/v${COMPOSE_VERSION}/docker-compose-linux-@UARCH\n\n# https://github.com/google/go-containerregistry/tree/main/cmd/crane\nFROM builder AS crane\nRUN go install github.com/google/go-containerregistry/cmd/crane@latest\nRUN cp $(find bin -name crane) /usr/local/bin\n\n# https://github.com/fluxcd/flux2/releases\nFROM builder AS flux\nARG FLUX_VERSION=2.7.2\nRUN helper-curl tar flux \\\n    https://github.com/fluxcd/flux2/releases/download/v$FLUX_VERSION/flux_${FLUX_VERSION}_linux_@GOARCH.tar.gz\n\n# https://github.com/tomnomnom/gron/releases\nFROM builder AS gron\nARG GRON_VERSION=v0.7.1\nRUN go install \"-ldflags=-X main.gronVersion=$GRON_VERSION\" github.com/tomnomnom/gron@$GRON_VERSION\nRUN cp $(find bin -name gron) /usr/local/bin\n\n# https://github.com/helmfile/helmfile/releases\nFROM builder AS helmfile\nARG HELMFILE_VERSION=1.1.7\nRUN helper-curl tar helmfile \\\n    https://github.com/helmfile/helmfile/releases/download/v${HELMFILE_VERSION}/helmfile_${HELMFILE_VERSION}_linux_@GOARCH.tar.gz\n\n# https://github.com/helm/helm/releases\nFROM builder AS helm\nARG HELM_VERSION=3.19.0\nRUN helper-curl tar \"--strip-components=1 linux-@GOARCH/helm\" \\\n    https://get.helm.sh/helm-v${HELM_VERSION}-linux-@GOARCH.tar.gz\n\n# Use emulation instead of cross-compilation for that one.\n# (The source is small enough, so I don't know if cross-compilation\n# would be worth the effort.)\nFROM alpine AS httping\nRUN apk add build-base cmake gettext git musl-libintl ncurses-dev openssl-dev\nRUN git clone https://github.com/folkertvanheusden/httping\nWORKDIR httping\nRUN sed -i s/60/0/ utils.c\n#RUN echo \"target_link_options(httping PUBLIC -static)\" >> CMakeLists.txt\nRUN cmake .\nRUN make install BINDIR=/usr/local/bin\n\n# https://github.com/simeji/jid/releases\nFROM builder AS jid\nARG JID_VERSION=0.7.6\nRUN go install github.com/simeji/jid/cmd/jid@v$JID_VERSION\nRUN cp $(find bin -name jid) /usr/local/bin\n\n# https://github.com/derailed/k9s/releases\nFROM builder AS k9s\nRUN helper-curl tar k9s \\\n    https://github.com/derailed/k9s/releases/latest/download/k9s_Linux_@GOARCH.tar.gz\n\n# https://github.com/kubernetes-sigs/kind/releases\nFROM builder AS kind\nARG KIND_VERSION=v0.30.0\nRUN helper-curl bin kind \\\n    https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-@GOARCH\n\n# https://github.com/kubernetes/kompose/releases\nFROM builder AS kompose\nRUN helper-curl bin kompose \\\n    https://github.com/kubernetes/kompose/releases/latest/download/kompose-linux-@GOARCH\n\n# https://github.com/kubecolor/kubecolor/releases\nFROM builder AS kubecolor\nARG KUBECOLOR_VERSION=0.5.2\nRUN helper-curl tar kubecolor \\\n    https://github.com/kubecolor/kubecolor/releases/download/v${KUBECOLOR_VERSION}/kubecolor_${KUBECOLOR_VERSION}_linux_@GOARCH.tar.gz\n\n# https://github.com/kubernetes/kubernetes/releases\nFROM builder AS kubectl\nARG KUBECTL_VERSION=1.34.1\nRUN helper-curl tar \"--strip-components=3 kubernetes/client/bin/kubectl\" \\\n    https://dl.k8s.io/v${KUBECTL_VERSION}/kubernetes-client-linux-@GOARCH.tar.gz\n\n# https://github.com/stackrox/kube-linter/releases\nFROM builder AS kube-linter\nARG KUBELINTER_VERSION=v0.7.6\nRUN go install golang.stackrox.io/kube-linter/cmd/kube-linter@$KUBELINTER_VERSION\nRUN cp $(find bin -name kube-linter) /usr/local/bin\n\n# https://github.com/doitintl/kube-no-trouble/releases\nFROM builder AS kubent\nARG KUBENT_VERSION=0.7.2\nRUN helper-curl tar kubent \\\n    https://github.com/doitintl/kube-no-trouble/releases/download/${KUBENT_VERSION}/kubent-${KUBENT_VERSION}-linux-@GOARCH.tar.gz\n\n# https://github.com/bitnami-labs/sealed-secrets/releases\nFROM builder AS kubeseal\nARG KUBESEAL_VERSION=0.32.2\nRUN helper-curl tar kubeseal \\\n    https://github.com/bitnami-labs/sealed-secrets/releases/download/v$KUBESEAL_VERSION/kubeseal-$KUBESEAL_VERSION-linux-@GOARCH.tar.gz\n\n# https://github.com/kubernetes-sigs/kustomize/releases\nFROM builder AS kustomize\nARG KUSTOMIZE_VERSION=5.8.1\nRUN helper-curl tar kustomize \\\n    https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v$KUSTOMIZE_VERSION/kustomize_v${KUSTOMIZE_VERSION}_linux_@GOARCH.tar.gz\n\n# https://github.com/kubernetes/minikube/releases\nFROM builder AS minikube\nARG MINIKUBE_VERSION=v1.37.0\nRUN git clone https://github.com/kubernetes/minikube --depth=1 --branch $MINIKUBE_VERSION\nWORKDIR minikube\nRUN make\nRUN cp out/minikube /usr/local/bin/minikube\n\n# https://ngrok.com/download\nFROM builder AS ngrok\nRUN helper-curl tar ngrok \\\n    https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-@GOARCH.tgz\n\n# https://github.com/derailed/popeye/releases\nFROM builder AS popeye\nRUN helper-curl tar popeye \\\n    https://github.com/derailed/popeye/releases/latest/download/popeye_linux_@GOARCH.tar.gz\n\n# https://github.com/regclient/regclient/releases\nFROM builder AS regctl\nARG REGCLIENT_VERSION=0.9.2\nRUN helper-curl bin regctl \\\n    https://github.com/regclient/regclient/releases/download/v$REGCLIENT_VERSION/regctl-linux-@GOARCH\n\n# https://github.com/GoogleContainerTools/skaffold/releases\nFROM builder AS skaffold\nRUN helper-curl bin skaffold \\\n    https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-@GOARCH\n\n# https://github.com/stern/stern/releases\nFROM builder AS stern\nARG STERN_VERSION=1.33.0\nRUN helper-curl tar stern \\\n    https://github.com/stern/stern/releases/download/v${STERN_VERSION}/stern_${STERN_VERSION}_linux_@GOARCH.tar.gz\n\n# https://github.com/tilt-dev/tilt/releases\nFROM builder AS tilt\nARG TILT_VERSION=0.35.2\nRUN helper-curl tar tilt \\\n    https://github.com/tilt-dev/tilt/releases/download/v${TILT_VERSION}/tilt.${TILT_VERSION}.linux-alpine.@WTFARCH.tar.gz\n\n# https://github.com/vmware-tanzu/velero/releases\nFROM builder AS velero\nARG VELERO_VERSION=1.17.0\nRUN helper-curl tar \"--strip-components=1 velero-v${VELERO_VERSION}-linux-@GOARCH/velero\" \\\n    https://github.com/vmware-tanzu/velero/releases/download/v${VELERO_VERSION}/velero-v${VELERO_VERSION}-linux-@GOARCH.tar.gz\n\n# https://github.com/carvel-dev/ytt/releases\nFROM builder AS ytt\nARG YTT_VERSION=0.52.1\nRUN helper-curl bin ytt \\\n    https://github.com/carvel-dev/ytt/releases/download/v${YTT_VERSION}/ytt-linux-@GOARCH\n\n# https://github.com/carvel-dev/kapp/releases\nFROM builder AS kapp\nARG YTT_VERSION=0.64.2\nRUN helper-curl bin kapp \\\n    https://github.com/carvel-dev/kapp/releases/download/v${YTT_VERSION}/kapp-linux-@GOARCH\n\nFROM alpine AS shpod\nENV COMPLETIONS=/usr/share/bash-completion/completions\nRUN apk add --no-cache apache2-utils bash bash-completion curl docker-cli docker-cli-compose docker-cli-buildx docker-engine file fzf gettext git iptables-legacy iputils jq libintl ncurses openssh openssl screen socat sudo tmux tree unzip vim yq\n\nCOPY --from=addmount    /addmount                     /usr/local/bin\nCOPY --from=argocd      /usr/local/bin/argocd         /usr/local/bin\nCOPY --from=bento       /usr/local/bin/bento          /usr/local/bin\nCOPY --from=compose     /usr/local/bin/docker-compose /usr/local/bin\nCOPY --from=crane       /usr/local/bin/crane          /usr/local/bin\nCOPY --from=flux        /usr/local/bin/flux           /usr/local/bin\nCOPY --from=gron        /usr/local/bin/gron           /usr/local/bin\nCOPY --from=helm        /usr/local/bin/helm           /usr/local/bin\nCOPY --from=helmfile    /usr/local/bin/helmfile       /usr/local/bin\nCOPY --from=httping     /usr/local/bin/httping        /usr/local/bin\nCOPY --from=jid         /usr/local/bin/jid            /usr/local/bin\nCOPY --from=k9s         /usr/local/bin/k9s            /usr/local/bin\nCOPY --from=kind        /usr/local/bin/kind           /usr/local/bin\nCOPY --from=kapp        /usr/local/bin/kapp           /usr/local/bin\nCOPY --from=kubectl     /usr/local/bin/kubectl        /usr/local/bin\nCOPY --from=kubecolor   /usr/local/bin/kubecolor      /usr/local/bin\nCOPY --from=kube-linter /usr/local/bin/kube-linter    /usr/local/bin\nCOPY --from=kubent      /usr/local/bin/kubent         /usr/local/bin\nCOPY --from=kubeseal    /usr/local/bin/kubeseal       /usr/local/bin\nCOPY --from=kustomize   /usr/local/bin/kustomize      /usr/local/bin\nCOPY --from=minikube    /usr/local/bin/minikube       /usr/local/bin\nCOPY --from=ngrok       /usr/local/bin/ngrok          /usr/local/bin\nCOPY --from=popeye      /usr/local/bin/popeye         /usr/local/bin\nCOPY --from=regctl      /usr/local/bin/regctl         /usr/local/bin\nCOPY --from=skaffold    /usr/local/bin/skaffold       /usr/local/bin\nCOPY --from=stern       /usr/local/bin/stern          /usr/local/bin\nCOPY --from=tilt        /usr/local/bin/tilt           /usr/local/bin\nCOPY --from=velero      /usr/local/bin/velero         /usr/local/bin\nCOPY --from=ytt         /usr/local/bin/ytt            /usr/local/bin\n\nRUN set -e ; for BIN in \\\n    argocd \\\n    crane \\\n    flux \\\n    helm \\\n    helmfile \\\n    kapp \\\n    kind \\\n    kubectl \\\n    kube-linter \\\n    kustomize \\\n    minikube \\\n    regctl \\\n    skaffold \\\n    tilt \\\n    velero \\\n    ytt \\\n    ; do echo $BIN ; $BIN completion bash > $COMPLETIONS/$BIN.bash ; done ;\\\n    stern --completion bash > $COMPLETIONS/stern\n\nRUN cd /tmp \\\n && git clone https://github.com/ahmetb/kubectx \\\n && cd kubectx \\\n && mv kubectx /usr/local/bin/kctx \\\n && mv kubens /usr/local/bin/kns \\\n && mv completion/kubectx.bash $COMPLETIONS/kctx.bash \\\n && mv completion/kubens.bash $COMPLETIONS/kns.bash \\\n && cd .. \\\n && rm -rf kubectx\nRUN cd /tmp \\\n && git clone https://github.com/jonmosco/kube-ps1 \\\n && cp kube-ps1/kube-ps1.sh /etc/bash/ \\\n && rm -rf kube-ps1\n\n# Create user and finalize setup.\n\nRUN echo k8s:x:1000: >> /etc/group \\\n && echo k8s:x:1000:1000::/home/k8s:/bin/bash >> /etc/passwd \\\n && sed -i 's/^docker:.*:$/\\0k8s/' /etc/group \\\n && echo \"k8s ALL=(ALL) NOPASSWD: ALL\" > /etc/sudoers.d/k8s \\\n && mkdir /home/k8s \\\n && chown -R k8s:k8s /home/k8s/ \\\n && sed -i 's/#MaxAuthTries 6/MaxAuthTries 42/' /etc/ssh/sshd_config \\\n && sed -i 's/AllowTcpForwarding no/AllowTcpForwarding yes/' /etc/ssh/sshd_config\nARG TARGETARCH\nRUN \\\n if [ \"$TARGETARCH\" != \"386\" ]; then \\\n mkdir /tmp/krew \\\n && cd /tmp/krew \\\n && curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_$TARGETARCH.tar.gz | tar -zxf- \\\n && sudo -u k8s -H ./krew-linux_$TARGETARCH install krew \\\n && cd \\\n && rm -rf /tmp/krew \\\n ; fi\nCOPY --chown=1000:1000 bashrc /home/k8s/.bashrc\nCOPY --chown=1000:1000 bash_profile /home/k8s/.bash_profile\nCOPY --chown=1000:1000 vimrc /home/k8s/.vimrc\nCOPY --chown=1000:1000 tmux.conf /home/k8s/.tmux.conf\nCOPY motd /etc/motd\nCOPY setup-tailhist.sh /usr/local/bin\nCOPY docker-socket.sh /usr/local/bin\nCOPY dind.sh /usr/local/bin\nCOPY kind.sh /usr/local/bin\nCOPY bore.sh /usr/local/bin\nVOLUME /var/lib/docker\n\n# Generate a list of all installed versions.\nRUN ( \\\n    ab -V | head -n1 ;\\\n    argocd version --client | head -n1 ;\\\n    echo \"bento $(bento --version | head -n1)\" ;\\\n    bash --version | head -n1 ;\\\n    curl --version | head -n1 ;\\\n    docker version --format=\"Docker {{.Client.Version}}\" ;\\\n    envsubst --version | head -n1 ;\\\n    flux --version ;\\\n    gron --version ;\\\n    git --version ;\\\n    jq --version ;\\\n    ssh -V ;\\\n    tmux -V ;\\\n    yq --version ;\\\n    docker-compose version ;\\\n    echo \"crane $(crane version)\" ;\\\n    echo \"Helm $(helm version --short)\" ;\\\n    echo \"Helmfile $(helmfile version -o=short | head -n1)\" ;\\\n    httping --version ;\\\n    jid --version ;\\\n    echo \"k9s $(k9s version | grep Version)\" ;\\\n    kind version ;\\\n    kapp --version | head -n1 ;\\\n    echo \"kubecolor $(kubecolor --kubecolor-version)\" ;\\\n    echo \"kubectl $(kubectl version --client | head -n1)\" ;\\\n    echo \"kube-linter $(kube-linter version)\" ;\\\n    echo \"kubent $(kubent --version 2>&1)\" ;\\\n    kubeseal --version ;\\\n    echo \"kustomize $(kustomize version | head -n1)\" ;\\\n    minikube version | head -n1 ;\\\n    ngrok version ;\\\n    echo \"popeye $(popeye version | grep Version)\" ;\\\n    echo \"regctl $(regctl version --format={{.VCSTag}})\" ;\\\n    echo \"skaffold $(skaffold version)\" ;\\\n    echo \"stern $(stern --version | grep ^version)\" ;\\\n    echo \"tilt $(tilt version)\" ;\\\n    echo \"velero $(velero version --client-only | grep Version)\" ;\\\n    ) > versions.txt\n\nCOPY init.sh /\nCMD [\"/init.sh\"]\nEXPOSE 22/tcp\nENV GENERATE_PASSWORD_LENGTH=20\n\nFROM node:20-slim AS nodejslibs\nWORKDIR /output\nRUN for LINKER in /lib64/ld-linux-x86-64.so.2 /lib/ld-linux-aarch64.so.1 /lib/ld-linux-armhf.so.3; do \\\n      if [ -f \"$LINKER\" ]; then \\\n        install -D \"$LINKER\" \"./$LINKER\" ;\\\n      fi ;\\\n    done\nRUN mkdir -p lib\nRUN for LIBDIR in x86_64-linux-gnu aarch64-linux-gnu arm-linux-gnueabihf; do \\\n      if [ -d \"/lib/$LIBDIR\" ]; then \\\n        cp -a \"/lib/$LIBDIR\" lib ;\\\n      fi ;\\\n    done\n\n# Define an extra build target with \"code-server\" (VScode in the browser) installed\nFROM shpod AS vspod\nCOPY --from=nodejslibs /output /\nCOPY --from=code-server /code-server /opt/code-server\nRUN ln -s /opt/code-server/bin/code-server /usr/local/bin\nRUN sudo -u k8s -H code-server --install-extension ms-azuretools.vscode-docker\nRUN sudo -u k8s -H code-server --install-extension ms-kubernetes-tools.vscode-kubernetes-tools\nCMD sudo -u k8s -H -E code-server --bind-addr 0:1789\nEXPOSE 1789\n\n# Define the default build target\nFROM shpod\n"
  },
  {
    "path": "README.md",
    "content": "# shpod\n\n**⚠️ Please listen carefully, as our ~~menu options~~\ninstallation instructions have changed.**\n\n~~Old instructions: `curl https://shpod.in | sh`~~\n\nNew instructions: use the Helm chart!\n\nTo get a shell in your Kubernetes cluster, with `cluster-admin` privileges:\n\n```bash\nhelm upgrade --install --repo https://shpod.in/ shpod shpod \\\n  --set rbac.cluster.clusterRoles=\"{cluster-admin}\"\nkubectl wait deployment shpod --for=condition=Available\nkubectl exec -ti deployment/shpod -- login -f k8s\n```\n\n## What's this?\n\nShpod (\"Shell in a pod\") is a tool to get a shell session with a ton\nof tools useful when working with containers, Docker, and Kubernetes.\n\nIt's composed of two parts:\n\n- a container image holding all the tools,\n- a Helm chart making it easy to deploy it on Kubernetes.\n\nIts goal is to provide a normalized environment, to go\nwith the training materials at https://container.training/,\nso that you can get all the tools you need regardless\nof your exact Kubernetes setup.\n\n\n## The shpod image\n\nIt's available as `jpetazzo/shpod` or `ghcr.io/jpetazzo/shpod`.\n\nIt's based on Alpine, and includes:\n\n- ab (ApacheBench)\n- bash\n- bento\n- crane\n- curl\n- Docker CLI\n- Docker Compose\n- envsubst\n- fzf\n- git\n- gron\n- Helm\n- jid\n- jq\n- kubectl\n- kubectx + kubens\n- kube-linter\n- kube-ps1\n- kubeseal\n- kustomize\n- ngrok\n- popeye\n- regctl\n- ship\n- skaffold\n- skopeo\n- SSH\n- stern\n- tilt\n- tmux\n- yq\n- ytt\n\nIt also includes completion for most of these tools.\n\nWhen this image starts, it will behave differently depending on whether\nit has a pseudo-terminal or not.\n\nIf it has a pseudo-terminal, it will spawn a shell.\nYou can access that shell by attaching to the container,\nwithout having to bother with networking or password configuration.\nYou can see that mode in action by running one of the following commands:\n\n```bash\ndocker run -ti jpetazzo/shpod\nkubectl run --rm -ti shpod --image jpetazzo/shpod\n```\n\nIf it does not have a pseudo-terminal, it will run an SSH server.\nDepending on the values of some environment variables, it will\nuse a provided password or generate one, or use SSH public key\nauthentication (see below, \"SSH access configuration\").\n\nYou can see that mode in action by running the following command:\n\n```bash\ndocker run jpetazzo/shpod\n```\n\nHowever, that mode will likely be more useful on Kubernetes, for instance:\n```bash\nkubectl create deployment shpod --image jpetazzo/shpod\nkubectl expose deployment shpod --port 22 --type=NodePort\nkubectl logs deployment/shpod\n```\n\nThe last command should show you the password that was generated\nfor the `k8s` user:\n\n```\nGenerating public/private rsa key pair.\nYour identification has been saved in /etc/ssh/ssh_host_rsa_key\nYour public key has been saved in /etc/ssh/ssh_host_rsa_key.pub\nThe key fingerprint is:\nSHA256:xEZav2W/XkJ45KaZvxVLNfudttmVwzvAbd8v/b8jkA0 root@shpod-5965cbcfc9-f5p8m\nThe key's randomart image is:\n+---[RSA 3072]----+\n|        o        |\n|       = .       |\n|      . + . o ...|\n|       o   E =  +|\n|        S . * B+ |\n|           o @o+B|\n|            = =OO|\n|             +o*@|\n|              =B%|\n+----[SHA256]-----+\nEnvironment variable $PASSWORD not found. Generating a password.\nPASSWORD=BlVweGRkEf1PQNdrhpjg\nchpasswd: password for 'k8s' changed\nServer listening on 0.0.0.0 port 22.\nServer listening on :: port 22.\n```\n\nIn both cases, you can also access shpod by executing a new shell\nin the existing container.\n\nWith Docker:\n```bash\ndocker exec -ti <container-id> login -f k8s\n```\n\nWith Kubernetes:\n```bash\nkubectl exec -ti deployment/shpod -- login -f k8s\n```\n\n\n## Multi-arch support\n\nShpod supports both Intel and ARM 64 bits architectures. The Dockerfile\nin this repository should be able to support other architectures fairly\neasily. If a given tool isn't available on the target architecture,\na dummy placeholder will be installed instead.\n\n\n## SSH access configuration\n\nThe user is always `k8s` - this is currently hard-coded.\n\nIt is possible to log in either by using a password, or SSH public key\nauthentication.\n\nIf the `$PASSWORD` variable is set, it will define the password for\nthe `k8s` user.\n\nIf the `$AUTHORIZED_KEYS` variable is set, it should hold one or multiple\nSSH public keys (one per line), and these keys will be added to the\n`~/.ssh/authorized_keys` file.\n\nIf neither `$PASSWORD` nor `$AUTHORIZED_KEYS` are set, then a random\npassword will be generated. By default, that password will be 20 characters\nlong, using digits, lowercase, and uppercase letters.\n\nIt is possible to change the length of the generated password by setting\nthe variable `$GENERATE_PASSWORD_LENGTH`. If that variable is set to `0`,\nno password will be generated.\n\n⚠️ When a password is generated, it is displayed on stdout. This means\nthat if someone has access to the logs of the container, they will be\nable to see that password.\n\n⚠️ If the container restarts for any reasons, a new password will be\ngenerated. This is considered to be a feature.\n\nWhen using shpod as part of a larger system, it is advised to set the\npassword (or the SSH keys) to avoid both warnings above.\n\n\n## Kubernetes permissions\n\nShpod is meant to be used inside Kubernetes clusters. Once you are\nrunning inside shpod, Kubernetes commands (like `kubectl` or `helm`)\nwill use \"in-cluster configuration\"; in other words, these commands\nwill use the ServiecAccount of the Pod that runs shpod.\n\nBy default, on most clusters, that ServiceAccount won't have much\npermissions, meaning that you will get errors like the following one:\n\n```console\n$ kubectl get pods\nError from server (Forbidden): pods is forbidden: User \"system:serviceaccount:default:default\" cannot list resource \"pods\" in API group \"\" in the namespace \"default\"\n```\n\nIf you want to use Kubernetes commands within shpod, you need\nto give permissions to that ServiceAccount.\n\nAssuming that you are running shpod in the `default` namespace\nand with the `default` ServiceAccount, you can run the following\ncommand to give `cluster-admin` privileges (=all privileges) to\nthe commands running in shpod:\n\n```bash\nkubectl create clusterrolebinding shpod \\\n        --clusterrole=cluster-admin \\\n        --serviceaccount=default:default\n```\n\n\n## Special handling of kubeconfig\n\nIf you have a ConfigMap named `kubeconfig` in the Namespace\nwhere shpod is running, it will extract the first file from\nthat ConfigMap and use it to populate `~/.kube/config`.\n\nThis lets you inject a custom kubeconfig file into shpod.\n\n\n## Helm chart\n\nSince November 2024, shpod also has a Helm chart!\n\nThis Helm chart offers the following features:\n\n- enable or disable the SSH server (depending on your needs)\n- put the `k8s` user home directory on a Persistent Volume\n- list Roles and ClusterRoles to bind to the ServiceAccount\n\nHere's an example of how to use it:\n\n```bash\nhelm upgrade --install --repo https://shpod.in/ shpod shpod \\\n  --set service.type=NodePort \\\n  --set resources.requests.cpu=0.1 \\\n  --set resources.requests.memory=500M \\\n  --set resources.limits.cpu=1 \\\n  --set resources.limits.memory=500M \\\n  --set persistentVolume.enabled=true \\\n  --set \"rbac.cluster.clusterRoles={cluster-admin}\" \\\n  --set ssh.authorized_keys=\"$(cat ~/.ssh/*.pub)\" \\\n  #\n```\n\n\n## I don't like Helm charts!\n\nYou can also use the following YAML manifest:\n\n```bash\nkubectl apply -f https://shpod.in/shpod.yaml\n```\n\nThen attach to the shpod pod:\n\n```bash\nkubectl attach --namespace=shpod -ti shpod\n```\n\nBut you really should use the Helm chart instead.\n\n\n## Why should I use the Helm chart?\n\nI'm using shpod when teaching Kubernetes classes. I deploy a Kubernetes\ncluster for each student, and they access the cluster by connecting with\nSSH. In some cases, I deploy the clusters with `kubeadm` on top of \"raw\"\nVMs, and the students connect directly to the nodes. In some cases, I'm\nusing managed Kubernetes clusters, and SSH access to the nodes may or\nmay not be possible; in any case, it will require different steps for\neach cloud provider. To simplify things, I built shpod, and use it to\nrun an SSH server that the students connect to.\n\nThis approach works great for most Kubernetes classes, but there are a\nfew scenarios that are problematic; specifically, when the Node running\nshpod is starved for resources, the shpod Pod might get evicted. This\ncauses all the files in the container to be deleted, which is not great\nwhen it happens during a class.\n\nThe solution to that problem has multiple layers:\n\n1. Specify resource requests and limits, in particular for memory, to\n   avoid the pod being evicted by memory pressure on the node.\n2. Place the `k8s` user home directory on a Persistent Volume, so that\n   the content of the home directory isn't lost if the Pod gets evicted\n   anyway or the underlying Node crashes or gets removed for any reason.\n3. Make that Persistent Volume optional, so that shpod still works on\n   clusters that don't have a Storage Class providing dynamic volume\n   provisioning. In that case, fall back gracefully to an `emptyDir`\n   volume, to prevent pod eviction by `kubectl drain` or by the cluster\n   autoscaler, and to persist files across container restarts.\n\nThe Helm chart lets you pick easily which configuration works best for\nyou: with or without the SSH server, with or without a password or SSH\npublic keys, with or without a Persistent Volume, with or without\nresource requests and limits...\n\n## Experimental stuff\n\nYou can enable code-server (basically \"VScode used from a browser\")\nand expose it over a `NodePort` like so:\n\n```bash\nhelm upgrade --install --repo https://shpod.in/ shpod shpod \\\n  --set codeServer.enabled=true \\\n  --set persistentVolume.enabled=true \\\n  --set rbac.cluster.clusterRoles=\"{cluster-admin}\" \\\n  --set resources.requests.cpu=0.1 \\\n  --set resources.requests.memory=500M \\\n  --set resources.limits.cpu=1 \\\n  --set resources.limits.memory=500M \\\n  --set service.type=NodePort \\\n  --set ssh.password=codeserver.support.is.beta.and.will.break\nkubectl wait deployment shpod --for=condition=Available\n```\n\nThis is super experimental; I'd like to refactor the image and the\nHelm chart before going further. So if you use this, you should expect\nit to break in the near future.\n\n"
  },
  {
    "path": "addmount.c",
    "content": "/*\n * This was taken from https://github.com/justincormack/addmount\n */\n\n#define _GNU_SOURCE\n#include <unistd.h>\n#include <fcntl.h>\n#include <sched.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <sys/mount.h>\n#include <sys/syscall.h>\n\n#ifndef O_PATH\n#define O_PATH 010000000\n#endif\n\nint open_tree(int dirfd, const char *pathname, unsigned int flags) {\n\treturn syscall(428, dirfd, pathname, flags);\n}\n\n#define OPEN_TREE_CLONE 1\n#define AT_RECURSIVE 0x8000\n\nint move_mount(int from_dirfd, const char *from_pathname, int to_dirfd, const char *to_pathname, unsigned int flags) {\n\treturn syscall(429, from_dirfd, from_pathname, to_dirfd, to_pathname, flags);\n}\n\n#define MOVE_MOUNT_F_SYMLINKS\t\t0x00000001\n#define MOVE_MOUNT_F_AUTOMOUNTS\t\t0x00000002\n#define MOVE_MOUNT_F_EMPTY_PATH\t\t0x00000004\n#define MOVE_MOUNT_T_SYMLINKS\t\t0x00000010\n#define MOVE_MOUNT_T_AUTOMOUNTS\t\t0x00000020\n#define MOVE_MOUNT_T_EMPTY_PATH\t\t0x00000040\n\nint main(int argc, char *argv[]) {\n\tif (argc != 5) {\n\t\tprintf(\"Usage %s src_pid src_path dst_pid dst_path\\n\", argv[0]);\n\t\texit(1);\n\t}\n\tconst char *spid = argv[1];\n\tconst char *src = argv[2];\n\tconst char *dpid = argv[3];\n\tconst char *dst = argv[4];\n\n\t// source mount namespace path\n        char smpath[128];\n        snprintf(smpath, 128, \"/proc/%s/ns/mnt\", spid);\n\n\t// source mount namespace fd\n        int smfd = open(smpath, O_RDONLY);\n        if (smfd == -1) {\n                perror(\"open source mount namespace\");\n                exit(1);\n        }\n\n\t// destination mlunt namespace path\n        char dmpath[128];\n        snprintf(dmpath, 128, \"/proc/%s/ns/mnt\", dpid);\n\n\t// destination mount namespace fd\n        int dmfd = open(dmpath, O_RDONLY);\n        if (dmfd == -1) {\n                perror(\"open destination mount namespace\");\n                exit(1);\n        }\n\n\t// enter source mount namespace\n        if (setns(smfd, CLONE_NEWNS) == -1) {\n                perror(\"setns source\");\n                exit(1);\n        }\n\tclose(smfd);\n\n\t// this creates a file descriptor equavalent to the mount --rbind tree at the source path\n\tint fd = open_tree(AT_FDCWD, src, OPEN_TREE_CLONE|AT_RECURSIVE);\n\tif (fd == -1) {\n\t\tif (errno == ENOSYS) {\n\t\t\tprintf(\"open_tree ENOSYS: you need kernel 5.2 to run this code, please upgrade\\n\");\n\t\t}\n\t\tperror(\"open_tree\");\n\t\texit(1);\n\t}\n\n\t// enter destination mount namespace\n\tif (setns(dmfd, CLONE_NEWNS) == -1) {\n\t\tperror(\"setns destination\");\n\t\texit(1);\n\t}\n\tclose(dmfd);\n\n\t// move the mount tree to the new path\n\tint e = move_mount(fd, \"\", AT_FDCWD, dst, MOVE_MOUNT_F_EMPTY_PATH);\n\tif (e == -1) {\n\t\tperror(\"move_mount\");\n\t\texit(1);\n\t}\n\n\tclose(fd);\n\n\treturn 0;\n}\n"
  },
  {
    "path": "bash_profile",
    "content": ". ~/.bashrc\n"
  },
  {
    "path": "bashrc",
    "content": "# In theory, ~/.bash_profile only gets loaded for interactive login shells,\n# meaning that it should run only once per session. It makes it the ideal\n# place to start e.g. ssh-agent and do other one-time, expensive operations.\n# On the other hand, aliases have to be defined in each shell, so they\n# would typically be defined in ~/.bashrc. ~/.bashrc is also ideal for\n# environment variables like PS1, or variables that we might want to redefine\n# easily, since ~/.bashrc gets reloaded in each shell. Since ~/.bashrc isn't\n# loaded in login shells, though, it makes sense to load it automatically\n# at the end of ~/.bash_profile.\n#\n# With all that said, though, this will run in containers, and we can't be\n# sure that there will be a proper login shell (for instance, if you run\n# \"kubectl exec -ti <pod> -- bash\" or \"docker exec -ti <container> bash\"\n# that will be a non-login interactive shell). Furthermore, when a shell is\n# executed from code-server, it uses a kind of special script to reproduce\n# the same default behavior (difference between login and non-login shells)\n# but I don't know how much we can rely on that.\n#\n# It looks like the best course of action would be to run everything in\n# ~/.bashrc, and invoke ~/.bashrc from ~/.bash_profile (or even make them\n# identical with a symlink). We can revise that strategy later if needed.\n\n###############################################################################\n# First, if we don't have a kubeconfig file, let's create one.\n# (This is necessary for kube_ps1 to operate correctly.)\nif ! [ -f ~/.kube/config ]; then\n  # If there is a ConfigMap named 'kubeconfig',\n  # extract the kubeconfig file from there.\n  # We need to access the Kubernetes API, so we'll do it\n  # using the well-known endpoint.\n  (\n    # Make sure that the file will have locked-down permissions.\n    # (Some tools like Helm will complain about it otherwise.)\n    umask 077\n    export KUBERNETES_SERVICE_HOST=kubernetes.default.svc\n    export KUBERNETES_SERVICE_PORT=443\n    if kubectl get configmap kubeconfig >&/dev/null; then\n      echo \"✏️ Downloading ConfigMap kubeconfig to .kube/config.\"\n      kubectl get configmap kubeconfig -o json |\n        jq -r '.data | to_entries | .[0].value' > ~/.kube/config\n    else\n      SADIR=/var/run/secrets/kubernetes.io/serviceaccount\n      # If we have a ServiceAccount token, use it.\n      if [ -r $SADIR/token ]; then\n        echo \"✏️ Generating .kube/config using ServiceAccount token.\"\n        kubectl config set-cluster shpod \\\n                --server=https://kubernetes.default.svc \\\n                --certificate-authority=/$SADIR/ca.crt\n        kubectl config set-credentials shpod \\\n                --token=$(cat $SADIR/token )\n        kubectl config set-context shpod \\\n                --cluster=shpod \\\n                --user=shpod\n        kubectl config use-context shpod\n      fi\n    fi\n  )\nfi\n# Note that we could also just set the following variables:\n#export KUBERNETES_SERVICE_HOST=kubernetes.default.svc\n#export KUBERNETES_SERVICE_PORT=443\n# ...But for some reason, that doesn't work with impersonation.\n# (i.e. using \"kubectl get pods --as=someone.else\")\n\n###############################################################################\n# Now, let's try some xterm magic to figure out if we have a light or dark\n# background, and automatically set the kubecolor theme accordingly.\n# Note that some terminals don't implement the special ANSI sequence that\n# we're using. On these terminals, our color detection mechanisms will incur\n# an extra 3 seconds delay when logging in, and kubecolor will be disabled.\n# Affected terminals include:\n# - MacOS Terminal\n# - Linux virtual consoles\nif [ ! \"$KUBECOLOR_PRESET\" ] && [ ! -f ~/.kube/color.yaml ]; then\n  KUBECOLOR_PRESET=$(\n    success=false\n    exec < /dev/tty\n    oldstty=$(stty -g)\n    stty raw -echo min 0\n    col=11      # background\n    #          OSC   Ps  ;Pt ST\n    echo -en \"\\033]${col};?\\033\\\\\" >/dev/tty  # echo opts differ w/ OSes\n    result=\n    if IFS=';' read -t 2 -r -d '\\' color ; then\n        result=$(echo $color | sed 's/^.*\\;//;s/[^rgb:0-9a-f/]//g')\n        success=true\n    fi\n    stty $oldstty\n    if $success; then\n      lumaformula=$(echo $result | sed 's/rgb:\\(.*\\)\\/\\(.*\\)\\/\\(.*\\)/(2*0x\\1+1*0x\\2+3*0x\\3)\\/6\\/653/')\n      luma=$((lumaformula))\n      if [ \"$luma\" -lt 25 ]; then\n        echo dark\n      elif [ \"$luma\" -gt 75 ]; then\n        echo light\n      else\n        echo unsure\n      fi\n    else\n      echo timeout\n    fi\n  )\n  case \"$KUBECOLOR_PRESET\" in\n  dark|light)\n    echo \"🎨 Automatically setting KUBECOLOR_PRESET=$KUBECOLOR_PRESET.\"\n    export KUBECOLOR_PRESET\n    unset NO_COLOR\n    ;;\n  *)\n    echo \"🎨 Failed to detect terminal background color. KUBECOLOR_PRESET not set.\"\n    unset KUBECOLOR_PRESET\n    export NO_COLOR=kubecolor_disabled\n    ;;\n  esac\nfi\n\n###############################################################################\n# Finally, set up prompt, PATH, completion, history... The classics :)\nif [ -f /etc/HOSTIP ]; then\n  HOSTIP=$(cat /etc/HOSTIP)\nelse\n  HOSTIP=\"0.0.0.0\"\nfi\nKUBE_PS1_PREFIX=\"\"\nKUBE_PS1_SUFFIX=\"\"\nKUBE_PS1_SYMBOL_ENABLE=\"false\"\nKUBE_PS1_CTX_COLOR=\"green\"\nKUBE_PS1_NS_COLOR=\"green\"\nPS1=\"\\e[1m\\e[31m[\\$HOSTIP] \\e[0m(\\$(kube_ps1)) \\e[34m\\u@\\h\\e[35m \\w\\e[0m\\n$ \"\n\nexport EDITOR=vim\nexport PATH=\"$HOME/.krew/bin:$PATH\"\n\nalias k=kubecolor\ncomplete -F __start_kubectl k\n. /usr/share/bash-completion/completions/kubectl.bash\n\nexport HISTSIZE=9999\nexport HISTFILESIZE=9999\nshopt -s histappend\ntrap 'history -a' DEBUG\nexport HISTFILE=~/.history\n\ntrap exit TERM\n\nis_kind_up() {\n  kubectl config get-contexts kind-kind >/dev/null 2>&1\n}\n\nif [ \"$CODESPACES\" = \"true\" ]; then\n  if ! is_kind_up; then\n    echo \"⏳️ KinD cluster isn't ready yet. Please wait.\"\n    echo \"💡 (Or press Ctrl-C if you don't want to wait.)\"\n    while ! is_kind_up; do\n      sleep 1\n    done\n  fi\nfi\n"
  },
  {
    "path": "bore.sh",
    "content": "#!/bin/sh\nset -eu\n\nCONTAINER_NAME=kind-control-plane\nCONTAINER_PID=$(docker inspect $CONTAINER_NAME --format '{{.State.Pid}}')\n\ndocker exec $CONTAINER_NAME touch /borens\naddmount $$ /proc/$$/ns/net $CONTAINER_PID /borens\n\ndocker exec $CONTAINER_NAME sh -c '\nset -e\nCNI_PLUGIN=$(cat /etc/cni/net.d/10-kindnet.conflist | jq -r \".plugins[0].type\")\ncat /etc/cni/net.d/10-kindnet.conflist | jq \".plugins[0] + {name: .name}\" |\nCNI_COMMAND=ADD CNI_CONTAINERID=bore CNI_NETNS=/borens CNI_IFNAME=bore CNI_PATH=/opt/cni/bin \\\n/opt/cni/bin/$CNI_PLUGIN\n' > /tmp/bore.json\n\nGATEWAY=$(jq -r .ip4.gateway < /tmp/bore.json)\n\nip route del default via $GATEWAY\nip route add 10.244.0.0/16 via $GATEWAY\nip route add 10.96.0.0/12 via $GATEWAY\n"
  },
  {
    "path": "build.sh",
    "content": "#!/bin/sh\nmkdir -p build\ncp shpod.sh shpod.yaml build\n\ncd build\nhelm package ../helm/shpod\nhelm repo index .\n\n"
  },
  {
    "path": "dind.sh",
    "content": "#!/bin/sh\nif [ $# = 0 ]; then\n  if ! sudo mountpoint -q /var/lib/docker; then\n    echo \"/var/lib/docker doesn't seem to be a mountpoint.\"\n    echo \"Docker-in-Docker probably won't work. Aborting.\"\n    exit 1\n  fi\n  if lsmod | grep -q ^iptable; then\n    echo \"Detected modules for legacy iptables.\"\n    echo \"Updating iptables to point to legacy binary.\"\n    sudo ln -sf xtables-legacy-multi $(which iptables)\n  fi\n  echo \"Starting Docker Engine in the background (logging to $HOME/docker.log).\"\n  nohup sudo sh -c \"$0 dockerd &\" >$HOME/docker.log\n  exit 0\nfi\n#\n# The rest of this script is taken verbatim from:\n# https://raw.githubusercontent.com/moby/moby/refs/heads/master/hack/dind\n#\nset -e\n\n# DinD: a wrapper script which allows docker to be run inside a docker container.\n# Original version by Jerome Petazzoni <jerome@docker.com>\n# See the blog post: https://www.docker.com/blog/docker-can-now-run-within-docker/\n#\n# This script should be executed inside a docker container in privileged mode\n# ('docker run --privileged', introduced in docker 0.6).\n\n# Usage: dind CMD [ARG...]\n\n# apparmor sucks and Docker needs to know that it's in a container (c) @tianon\n#\n# Set the container env-var, so that AppArmor is enabled in the daemon and\n# containerd when running docker-in-docker.\n#\n# see: https://github.com/containerd/containerd/blob/787943dc1027a67f3b52631e084db0d4a6be2ccc/pkg/apparmor/apparmor_linux.go#L29-L45\n# see: https://github.com/moby/moby/commit/de191e86321f7d3136ff42ff75826b8107399497\nexport container=docker\n\n# Allow AppArmor to work inside the container;\n#\n#     aa-status\n#     apparmor filesystem is not mounted.\n#     apparmor module is loaded.\n#\n#     mount -t securityfs none /sys/kernel/security\n#\n#     aa-status\n#     apparmor module is loaded.\n#     30 profiles are loaded.\n#     30 profiles are in enforce mode.\n#       /snap/snapd/18357/usr/lib/snapd/snap-confine\n#       ...\n#\n# Note: https://0xn3va.gitbook.io/cheat-sheets/container/escaping/sensitive-mounts#sys-kernel-security\n#\n#     ## /sys/kernel/security\n#\n#     In /sys/kernel/security mounted the securityfs interface, which allows\n#     configuration of Linux Security Modules. This allows configuration of\n#     AppArmor policies, and so access to this may allow a container to disable\n#     its MAC system.\n#\n# Given that we're running privileged already, this should not be an issue.\nif [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then\n\tmount -t securityfs none /sys/kernel/security || {\n\t\techo >&2 'Could not mount /sys/kernel/security.'\n\t\techo >&2 'AppArmor detection and --privileged mode might break.'\n\t}\nfi\n\n# Mount /tmp (conditionally)\n# /tmp must be 'exec,rw', and 'dev' to allow mknod to work for the\n# pkg/archive/archive_linux_test.go tests.\nif ! mountpoint -q /tmp; then\n\tmount -t tmpfs none /tmp\nfi\n\n# cgroup v2: enable nesting\nif [ -f /sys/fs/cgroup/cgroup.controllers ]; then\n\t# move the processes from the root group to the /init group,\n\t# otherwise writing subtree_control fails with EBUSY.\n\t# An error during moving non-existent process (i.e., \"cat\") is ignored.\n\tmkdir -p /sys/fs/cgroup/init\n\t# this happens in a loop because things like \"docker exec\" on our dind\n\t# container will create new processes, which creates a race between our\n\t# moving everything to \"init\" and enabling subtree_control\n\twhile ! {\n\t\t# move the processes from the root group to the /init group,\n\t\t# otherwise writing subtree_control fails with EBUSY.\n\t\t# An error during moving non-existent process (i.e., \"cat\") is ignored.\n\t\txargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :\n\t\t# enable controllers\n\t\tsed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \\\n\t\t\t> /sys/fs/cgroup/cgroup.subtree_control\n\t}; do true; done\nfi\n\n# Change mount propagation to shared to make the environment more similar to a\n# modern Linux system, e.g. with SystemD as PID 1.\nmount --make-rshared /\n\nif [ $# -gt 0 ]; then\n\texec \"$@\"\nfi\n\necho >&2 'ERROR: No command specified.'\necho >&2 'You probably want to run hack/make.sh, or maybe a shell?'\n"
  },
  {
    "path": "docker-socket.sh",
    "content": "#!/bin/sh\n#\n# This script is not used at the moment (as of the April 2025 changes to\n# add support for devcontainers) but it might be used in the future in\n# an attempt to support \"docker-outside-docker\" instead of \"docker-in-docker\".\n#\nsudo nohup >/dev/null sh -c \"\n  socat unix-listen:/var/run/docker.sock,fork,user=k8s unix-connect:/var/run/docker-host.sock &\n\"\n"
  },
  {
    "path": "helm/shpod/.helmignore",
    "content": "# Patterns to ignore when building packages.\n# This supports shell glob matching, relative path matching, and\n# negation (prefixed with !). Only one pattern per line.\n.DS_Store\n# Common VCS dirs\n.git/\n.gitignore\n.bzr/\n.bzrignore\n.hg/\n.hgignore\n.svn/\n# Common backup files\n*.swp\n*.bak\n*.tmp\n*.orig\n*~\n# Various IDEs\n.project\n.idea/\n*.tmproj\n.vscode/\n"
  },
  {
    "path": "helm/shpod/Chart.yaml",
    "content": "apiVersion: v2\nname: shpod\nversion: 0.2.0\ndescription: Shell in a Pod\nkeywords:\n  - ssh\n  - sshd\n  - shell\ntype: application\nhome: https://github.com/jpetazzo/shpod\nsources:\n  - https://github.com/jpetazzo/shpod\nmaintainers:\n  - name: Jérôme Petazzoni\n    email: jerome.petazzoni@gmail.com\n"
  },
  {
    "path": "helm/shpod/templates/NOTES.txt",
    "content": "{{- if .Values.ssh.enabled }}\nThe SSH server is enabled. You can connect to it with an SSH client.\nUse the following command to see how the SSH server is exposed:\n\nkubectl get service {{ include \"shpod.fullname\" . }} --namespace {{ .Release.Namespace }}\n\nYou can access it with kubectl port-forward, like this:\n\nkubectl port-forward service {{ include \"shpod.fullname\" . }} --namespace {{ .Release.Namespace }} 2222:22\n\n...And then connect using \"ssh -l k8s -p 2222 localhost\".\n{{- else }}\nThe SSH server isn't enabled. You can attach to the shpod shell like this:\n\nkubectl attach -ti deployment/{{ include \"shpod.fullname\" . }} --namespace {{ .Release.Namespace }}\n{{- end }}\n\nYou can also execute a new shpod shell like this:\n\nkubectl exec -ti deployment/{{ include \"shpod.fullname\" . }} --namespace {{ .Release.Namespace }} -- login -f k8s\n"
  },
  {
    "path": "helm/shpod/templates/_helpers.tpl",
    "content": "{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"shpod.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" }}\n{{- end }}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"shpod.fullname\" -}}\n{{- if .Values.fullnameOverride }}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" }}\n{{- else }}\n{{- $name := default .Chart.Name .Values.nameOverride }}\n{{- if contains $name .Release.Name }}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" }}\n{{- else }}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" }}\n{{- end }}\n{{- end }}\n{{- end }}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"shpod.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" }}\n{{- end }}\n\n{{/*\nCommon labels\n*/}}\n{{- define \"shpod.labels\" -}}\nhelm.sh/chart: {{ include \"shpod.chart\" . }}\n{{ include \"shpod.selectorLabels\" . }}\n{{- if .Chart.AppVersion }}\napp.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n{{- end }}\napp.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- end }}\n\n{{/*\nSelector labels\n*/}}\n{{- define \"shpod.selectorLabels\" -}}\napp.kubernetes.io/name: {{ include \"shpod.name\" . }}\napp.kubernetes.io/instance: {{ .Release.Name }}\n{{- end }}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"shpod.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create }}\n{{- default (include \"shpod.fullname\" .) .Values.serviceAccount.name }}\n{{- else }}\n{{- default \"default\" .Values.serviceAccount.name }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm/shpod/templates/deployment.yaml",
    "content": "apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: {{ include \"shpod.fullname\" . }}\n  labels:\n    {{- include \"shpod.labels\" . | nindent 4 }}\nspec:\n  replicas: {{ .Values.replicaCount }}\n  selector:\n    matchLabels:\n      {{- include \"shpod.selectorLabels\" . | nindent 6 }}\n  template:\n    metadata:\n      {{- with .Values.podAnnotations }}\n      annotations:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      labels:\n        {{- include \"shpod.labels\" . | nindent 8 }}\n        {{- with .Values.podLabels }}\n        {{- toYaml . | nindent 8 }}\n        {{- end }}\n    spec:\n      {{- with .Values.imagePullSecrets }}\n      imagePullSecrets:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      serviceAccountName: {{ include \"shpod.serviceAccountName\" . }}\n      securityContext:\n        {{- toYaml .Values.podSecurityContext | nindent 8 }}\n      initContainers:\n        - name: copyhome\n          image: \"{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}\"\n          imagePullPolicy: {{ .Values.image.pullPolicy }}\n          volumeMounts:\n            - name: home\n              mountPath: /copyhome\n          command:\n            - cp\n            - -a\n            - /home/k8s/.\n            - /copyhome\n      containers:\n        - name: {{ .Chart.Name }}\n          securityContext:\n            {{- toYaml .Values.securityContext | nindent 12 }}\n          image: \"{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}\"\n          imagePullPolicy: {{ .Values.image.pullPolicy }}\n          {{- if eq .Values.ssh.enabled false }}\n          stdin: true\n          tty: true\n          {{- end }}\n          env:\n            - name: HOSTIP\n              valueFrom:\n                fieldRef:\n                  fieldPath: status.hostIP\n            {{- if .Values.ssh.password }}\n            - name: PASSWORD\n              value: \"{{ .Values.ssh.password }}\"\n            {{- end }}\n            {{- if .Values.ssh.authorized_keys }}\n            - name: AUTHORIZED_KEYS\n              value: |\n                {{ .Values.ssh.authorized_keys | nindent 16 }}\n            {{- end }}\n          ports:\n            - name: ssh\n              containerPort: 22\n              protocol: TCP\n          livenessProbe:\n            {{- toYaml .Values.livenessProbe | nindent 12 }}\n          readinessProbe:\n            {{- toYaml .Values.readinessProbe | nindent 12 }}\n          resources:\n            {{- toYaml .Values.resources | nindent 12 }}\n          volumeMounts:\n            - name: home\n              mountPath: /home/k8s\n            {{- with .Values.volumeMounts }}\n              {{- toYaml . | nindent 12 }}\n            {{- end }}\n        {{ if .Values.codeServer.enabled }}\n        - name: code-server\n          securityContext:\n            {{- toYaml .Values.securityContext | nindent 12 }}\n          image: \"{{ .Values.image.repository }}:vspod\"\n          imagePullPolicy: {{ .Values.image.pullPolicy }}\n          env:\n            - name: HOSTIP\n              valueFrom:\n                fieldRef:\n                  fieldPath: status.hostIP\n            {{- if ( .Values.codeServer.password | default .Values.ssh.password ) }}\n            - name: PASSWORD\n              value: \"{{ .Values.codeServer.password | default .Values.ssh.password }}\"\n            {{- end }}\n          ports:\n            - name: code-server\n              containerPort: {{ .Values.codeServer.containerPort }}\n              protocol: TCP\n          resources:\n            {{- toYaml .Values.codeServer.resources | nindent 12 }}\n          volumeMounts:\n            - name: home\n              mountPath: /home/k8s\n            {{- with .Values.volumeMounts }}\n              {{- toYaml . | nindent 12 }}\n            {{- end }}\n      {{ end }}\n      volumes:\n        - name: home\n          {{- if .Values.persistentVolume.enabled }}\n          persistentVolumeClaim:\n            claimName: {{ include \"shpod.fullname\" . }}\n          {{- end }}\n        {{- with .Values.volumes }}\n          {{- toYaml . | nindent 8 }}\n        {{- end }}\n      {{- with .Values.nodeSelector }}\n      nodeSelector:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      {{- with .Values.affinity }}\n      affinity:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n      {{- with .Values.tolerations }}\n      tolerations:\n        {{- toYaml . | nindent 8 }}\n      {{- end }}\n"
  },
  {
    "path": "helm/shpod/templates/persistentvolumeclaim.yaml",
    "content": "{{- if .Values.persistentVolume.enabled -}}\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: {{ include \"shpod.fullname\" . }}\n  labels:\n    {{- include \"shpod.labels\" . | nindent 4 }}\n  {{- with .Values.serviceAccount.annotations }}\n  annotations:\n    {{- toYaml . | nindent 4 }}\n  {{- end }}\nspec:\n  accessModes:\n    {{ .Values.persistentVolume.accessModes | toYaml | nindent 4 }}\n  resources:\n    requests:\n      storage: {{ .Values.persistentVolume.size }}\n  {{- with .Values.persistentVolume.storageClass }}\n  storageClassName: {{ . }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm/shpod/templates/rbac.yaml",
    "content": "{{- if .Values.rbac.enabled -}}\n{{- range .Values.rbac.cluster.clusterRoles }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: {{\n    printf \"%s-%s-%s\" \n    $.Release.Namespace (include \"shpod.fullname\" $) .\n    }}\n  labels:\n    {{- include \"shpod.labels\" $ | nindent 4 }}\n  {{- with $.Values.serviceAccount.annotations }}\n  annotations:\n    {{- toYaml . | nindent 4 }}\n  {{- end }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: {{ . }}\nsubjects:\n- kind: ServiceAccount\n  name: {{ include \"shpod.serviceAccountName\" $ }}\n  namespace: {{ $.Release.Namespace }}\n{{- end }}\n{{- range .Values.rbac.namespace.clusterRoles }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{\n    printf \"%s-clusterrole-%s\" \n    (include \"shpod.fullname\" $) .\n    }}\n  labels:\n    {{- include \"shpod.labels\" $ | nindent 4 }}\n  {{- with $.Values.serviceAccount.annotations }}\n  annotations:\n    {{- toYaml . | nindent 4 }}\n  {{- end }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: {{ . }}\nsubjects:\n- kind: ServiceAccount\n  name: {{ include \"shpod.serviceAccountName\" $ }}\n  namespace: {{ $.Release.Namespace }}\n{{- end }}\n{{- range .Values.rbac.namespace.roles }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{\n    printf \"%s-role-%s\" \n    (include \"shpod.fullname\" $) .\n    }}\n  labels:\n    {{- include \"shpod.labels\" $ | nindent 4 }}\n  {{- with $.Values.serviceAccount.annotations }}\n  annotations:\n    {{- toYaml . | nindent 4 }}\n  {{- end }}\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: {{ . }}\nsubjects:\n- kind: ServiceAccount\n  name: {{ include \"shpod.serviceAccountName\" $ }}\n  namespace: {{ $.Release.Namespace }}\n{{- end }}\n{{- end }}\n"
  },
  {
    "path": "helm/shpod/templates/rolebinding.yaml",
    "content": ""
  },
  {
    "path": "helm/shpod/templates/service.yaml",
    "content": "apiVersion: v1\nkind: Service\nmetadata:\n  name: {{ include \"shpod.fullname\" . }}\n  labels:\n    {{- include \"shpod.labels\" . | nindent 4 }}\nspec:\n  type: {{ .Values.service.type }}\n  ports:\n    - port: {{ .Values.service.port }}\n      targetPort: ssh\n      protocol: TCP\n      name: ssh\n  {{ if .Values.codeServer.enabled }}\n    - port: {{ .Values.codeServer.servicePort }}\n      targetPort: {{ .Values.codeServer.containerPort }}\n      protocol: TCP\n      name: code-server\n  {{ end }}\n  selector:\n    {{- include \"shpod.selectorLabels\" . | nindent 4 }}\n"
  },
  {
    "path": "helm/shpod/templates/serviceaccount.yaml",
    "content": "{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: {{ include \"shpod.serviceAccountName\" . }}\n  labels:\n    {{- include \"shpod.labels\" . | nindent 4 }}\n  {{- with .Values.serviceAccount.annotations }}\n  annotations:\n    {{- toYaml . | nindent 4 }}\n  {{- end }}\nautomountServiceAccountToken: {{ .Values.serviceAccount.automount }}\n{{- end }}\n"
  },
  {
    "path": "helm/shpod/values.yaml",
    "content": "# Default values for shpod.\n# This is a YAML-formatted file.\n# Declare variables to be passed into your templates.\n\n# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/\nreplicaCount: 1\n\n# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/\nimage:\n  repository: ghcr.io/jpetazzo/shpod\n  # This sets the pull policy for images.\n  pullPolicy: IfNotPresent\n  # Overrides the image tag whose default is the chart appVersion.\n  tag: latest\n\n# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\nimagePullSecrets: []\n# This is to override the chart name.\nnameOverride: \"\"\nfullnameOverride: \"\"\n\n#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/\nserviceAccount:\n  # Specifies whether a service account should be created\n  create: true\n  # Automatically mount a ServiceAccount's API credentials?\n  automount: true\n  # Annotations to add to the service account\n  annotations: {}\n  # The name of the service account to use.\n  # If not set and create is true, a name is generated using the fullname template\n  name: \"\"\n\n# This is for setting Kubernetes Annotations to a Pod.\n# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ \npodAnnotations: {}\n# This is for setting Kubernetes Labels to a Pod.\n# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\npodLabels: {}\n\npodSecurityContext: {}\n  # fsGroup: 2000\n\nsecurityContext: {}\n  # capabilities:\n  #   drop:\n  #   - ALL\n  # readOnlyRootFilesystem: true\n  # runAsNonRoot: true\n  # runAsUser: 1000\n\n# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/\nservice:\n  # This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types\n  type: ClusterIP\n  # This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports\n  port: 22\n\nresources: {}\n  # We usually recommend not to specify default resources and to leave this as a conscious\n  # choice for the user. This also increases chances charts run on environments with little\n  # resources, such as Minikube. If you do want to specify resources, uncomment the following\n  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.\n  # limits:\n  #   cpu: 100m\n  #   memory: 128Mi\n  # requests:\n  #   cpu: 100m\n  #   memory: 128Mi\n\n# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/\nlivenessProbe:\nreadinessProbe:\n\n# Additional volumes on the output Deployment definition.\nvolumes: []\n# - name: foo\n#   secret:\n#     secretName: mysecret\n#     optional: false\n\n# Additional volumeMounts on the output Deployment definition.\nvolumeMounts: []\n# - name: foo\n#   mountPath: \"/etc/foo\"\n#   readOnly: true\n\nnodeSelector: {}\n\ntolerations: []\n\naffinity: {}\n\n# These values are inspired by the ones in the Prometheus chart.\n# (https://artifacthub.io/packages/helm/prometheus-community/prometheus)\npersistentVolume:\n  ## If true, we will create and use a PVC for $HOME.\n  ## If false, we'll use an emptyDir instead.\n  enabled: false\n  ## The remaining values are used only when \"enabled\" is true.\n  accessModes:\n    - ReadWriteOnce\n  size: 1G\n  storageClass: null\n\nrbac:\n  ## If rbac.enabled=false:\n  ## no RoleBinding or ClusterRoleBinding will be created.\n  enabled: true\n  cluster:\n    ## rbac.cluster.clusterRoles:\n    ## list of ClusterRoles that should be granted to the ServiceAccount, cluster-wide.\n    clusterRoles: []\n  namespace:\n    ## rbac.namespace.clusterRoles:\n    ## list of ClusterRoles that should be granted to the ServiceAccount, only in the application Namespace.\n    clusterRoles: [ view ]\n    ## rbac.namespace.roles:\n    ## list of Roles that should be granted to the ServiceAccount in the application Namespace.\n    roles: []\n\nssh:\n  ## If SSH is enabled, you can connect to shpod with an SSH client\n  ## or with \"kubectl exec\".\n  ## If SSH is disabled, you cannot connect to shpod with SSH,\n  ## but you can use \"kubectl exec\" or \"kubectl attach\".\n  enabled: true\n  ## If authorized_keys is set, it will be added to the k8s account\n  ## ~/.ssh/authorized_keys file. (It should be a string; for multiple\n  ## keys, use a multi-line string.)\n  authorized_keys: \"\"\n  ## If password is set, it will be used to set the password for the k8s user.\n  password: \"\"\n  ## If neither authorized_keys nor password is set, a random password will be generated.\n\ncodeServer:\n  ## If code-server is enabled, an extra container will be added in the Pod.\n  ## That container will run code-server (basically VScode in a browser).\n  ## An extra port will be added to the shpod Service.\n  enabled: false\n  servicePort: 80\n  containerPort: 1789\n  ## If the password is blank, it will default to ssh.password.\n  password: \"\"\n  resources: {}\n"
  },
  {
    "path": "helper-curl",
    "content": "#!/bin/sh\n\nset -e\n\nTYPE=$1\nBIN_OR_ARGS=$2\nURL=$3\n\ncase $TARGETARCH in\namd64)\n  GOARCH=amd64\n  UARCH=x86_64\n  WTFARCH=x86_64\n  CODERARCH=amd64\n  ;;\narm64)\n  GOARCH=arm64\n  UARCH=aarch64\n  WTFARCH=arm64\n  CODERARCH=arm64\n  ;;\narm)\n  GOARCH=arm\n  UARCH=armv7\n  WTFARCH=arm\n  CODERARCH=armv7l\n  ;;\n*)\n  echo \"Unsupported architecture: $TARGETARCH.\"\n  GOARCH=$TARGETARCH\n  UARCH=$TARGETARCH\n  WTFARCH=$TARGETARCH\n  CODERARCH=$TARGETARCH\n  ;;\nesac\n\nmangle() {\n  echo $1 | sed \\\n  -e s/@GOARCH/$GOARCH/g \\\n  -e s/@UARCH/$UARCH/g \\\n  -e s/@WTFARCH/$WTFARCH/g \\\n  -e s/@CODERARCH/$CODERARCH/g \\\n  #\n}\n\nURL=$(mangle $URL)\nBIN_OR_ARGS=$(mangle \"$BIN_OR_ARGS\")\n\nif ! curl -fsSLI $URL >/dev/null; then\n  echo \"URL not found: $URL\"\n  BIN=${BIN_OR_ARGS##*/}\n  echo \"Installing placeholder: $BIN\"\n  cp /bin/helper-unsupported /usr/local/bin/$BIN\n  exit 0\nfi\n\ncase \"$TYPE\" in\nbin)\n  BIN=$BIN_OR_ARGS\n  curl -fsSL $URL > /usr/local/bin/$BIN\n  chmod +x /usr/local/bin/$BIN\n  ;;\ntar)\n  ARGS=$BIN_OR_ARGS\n  curl -fsSL $URL | tar -zxvf- -C /usr/local/bin $ARGS\n  ;;\n*)\n  echo \"Unrecognized download type: $TYPE\"\n  exit 1\n  ;;\nesac\n"
  },
  {
    "path": "helper-unsupported",
    "content": "#!/bin/sh\necho \"# ⚠️ $0 is not supported on this platform ($(uname -m)).\"\n"
  },
  {
    "path": "init.sh",
    "content": "#!/usr/bin/env bash\nset -e\n\n# If there is a tty, give us a shell.\n# (This happens e.g. when we do \"docker run -ti jpetazzo/shpod\".)\n# Otherwise, start an SSH server.\n# (This happens e.g. when we use that image in a Pod in a Deployment.)\n\nif tty >/dev/null; then\n  exec login -f k8s\nelse\n  if ! [ -f /etc/ssh/ssh_host_rsa_key ]; then\n    ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -N \"\"\n  fi\n  if [ \"$AUTHORIZED_KEYS\" ]; then\n    echo 'Environment variable $AUTHORIZED_KEYS found. Adding keys.'\n    sudo -u k8s mkdir -p ~k8s/.ssh\n    sudo -u k8s touch ~k8s/.ssh/authorized_keys\n    while read KEY; do\n      if [ \"$KEY\" ] && ! grep -q \"$KEY\" ~k8s/.ssh/authorized_keys; then\n        echo \"$KEY\" >> ~k8s/.ssh/authorized_keys\n      fi\n    done <<< \"$AUTHORIZED_KEYS\"\n  fi\n  if [ \"$PASSWORD\" ]; then\n    echo 'Environment variable $PASSWORD found. Setting user password.'\n  else\n    if [ ! \"$AUTHORIZED_KEYS\" -a \"${GENERATE_PASSWORD_LENGTH-0}\" -gt 0 ]; then\n      echo 'Environment variable $PASSWORD not found. Generating a password.'\n      PASSWORD=$(base64 /dev/urandom | tr -d +/ | head -c $GENERATE_PASSWORD_LENGTH)\n      echo \"PASSWORD=$PASSWORD\"\n    else\n      echo 'Environment variable $PASSWORD not found. User password will not be set.'\n    fi\n  fi\n  if [ \"$PASSWORD\" ]; then\n    echo \"k8s:$PASSWORD\" | chpasswd\n  fi\n  exec /usr/sbin/sshd -D -e\nfi\n\n"
  },
  {
    "path": "kind.sh",
    "content": "#!/bin/sh\n#\n# This script tries to create a KinD cluster and then add\n# a couple of routes so that the pod CIDR and the service\n# CIDR are directly rechable from the local machine.\n# This simplifies the Kubernetes learning experience, as\n# pods and services become reachable directly from the\n# local machine, without having to use port forwarding or\n# other mechanisms. Note, however, that it only works on\n# Linux machines!\n#\nkubectl config get-contexts kind-kind || kind create cluster\ndocker exec kind-control-plane true || docker start kind-control-plane\nNODE_ADDR=$(\n  docker inspect kind-control-plane |\n  jq -r .[].NetworkSettings.Networks.kind.IPAddress\n)\nsudo ip route add 10.244.0.0/24 via $NODE_ADDR\nsudo ip route add 10.96.0.0/12 via $NODE_ADDR\n\n"
  },
  {
    "path": "motd",
    "content": "\n🐚 Welcome to shpod - SHell in a POD.\n🔎 Check \"/versions.txt\" to see the list of included tools.\n🔗 See https://github.com/jpetazzo/shpod for more information.\n📦️ You can install extra packages with 'sudo apk add PKGNAME'.\n\n"
  },
  {
    "path": "netlify.toml",
    "content": "[build]\n  publish = \"build/\"\n  command = \"./build.sh\"\n\n[[redirects]]\n  from = \"/\"\n  to = \"/shpod.sh\"\n  status = 200\n\n"
  },
  {
    "path": "setup-tailhist.sh",
    "content": "#!/bin/sh\nset -ex\nmkdir /tmp/tailhist\ncd /tmp/tailhist\nWEBSOCKETD_VERSION=0.4.1\nwget https://github.com/joewalnes/websocketd/releases/download/v$WEBSOCKETD_VERSION/websocketd-$WEBSOCKETD_VERSION-linux_amd64.zip\nunzip websocketd-$WEBSOCKETD_VERSION-linux_amd64.zip\ncurl https://raw.githubusercontent.com/jpetazzo/container.training/main/prepare-labs/lib/tailhist.html > index.html\nkubectl patch service shpod --namespace shpod -p \"\nspec:\n  ports:\n  - name: tailhist\n    port: 1088\n    targetPort: 1088\n    nodePort: 30088\n    protocol: TCP\n\"\n./websocketd --port=1088 --staticdir=. sh -c \"\n  tail -n +1 -f $HOME/.history ||\n  echo 'Could not read history file. Perhaps you need to \\\"chmod +r .history\\\"?'\n  \"  \n"
  },
  {
    "path": "shpod.sh",
    "content": "#!/bin/sh\n# For more information about shpod, check it out on GitHub:\n# https://github.com/jpetazzo/shpod\nif [ -f shpod.yaml ]; then\n  YAML=shpod.yaml\nelse\n  YAML=https://raw.githubusercontent.com/jpetazzo/shpod/main/shpod.yaml\nfi\nif [ \"$(kubectl get pod --namespace=shpod shpod --ignore-not-found -o jsonpath={.status.phase})\" = \"Running\" ]; then\n  echo \"Shpod is already running. Starting a new shell with 'kubectl exec'.\"\n  echo \"(Note: if the main invocation of shpod exits, all others will be terminated.)\"\n  kubectl exec -ti --namespace=shpod shpod -- bash -l\n  if [ $? = 137 ]; then\n    echo \"Shpod was terminated by SIGKILL. This will happen when the main invocation\"\n    echo \"of shpod exits (all processes started by 'kubectl exec' are then terminated).\"\n  fi\n  exit 0\nfi\necho \"Applying YAML: $YAML...\"\nkubectl apply -f $YAML\necho \"Waiting for pod to be ready...\"\nkubectl wait --namespace=shpod --for condition=Ready pod/shpod\necho \"Attaching to the pod...\"\nkubectl attach --namespace=shpod -ti shpod </dev/tty\necho \"Deleting pod...\"\necho \"\nNote: it's OK to press Ctrl-C if this takes too long and you're impatient.\nClean up will continue in the background. However, if you want to restart\nshpod, you might have to wait a bit (about 30 seconds).\n\"\nkubectl delete -f $YAML --now\necho \"Done.\"\n"
  },
  {
    "path": "shpod.yaml",
    "content": "apiVersion: v1\nkind: Namespace\nmetadata:\n  name: shpod\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: shpod\n  namespace: shpod\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: shpod\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: cluster-admin\nsubjects:\n- kind: ServiceAccount\n  name: shpod\n  namespace: shpod\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: shpod\n  namespace: shpod\nspec:\n  serviceAccountName: shpod\n  containers:\n  - name: shpod\n    image: jpetazzo/shpod\n    stdin: true\n    tty: true\n    env:\n    - name: HOSTIP\n      valueFrom:\n        fieldRef:\n          fieldPath: status.hostIP\n"
  },
  {
    "path": "tmux.conf",
    "content": "set -g status-style bg=blue,fg=white,bold\nset-option -g history-limit 1000000\n"
  },
  {
    "path": "vimrc",
    "content": "syntax on\nset autoindent\nset expandtab\nset number\nset shiftwidth=2\nset softtabstop=2\nset nowrap\n"
  }
]